xref: /openbmc/linux/net/mac80211/mesh_pathtbl.c (revision 93696d8f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2008, 2009 open80211s Ltd.
4  * Author:     Luis Carlos Cobo <luisca@cozybit.com>
5  */
6 
7 #include <linux/etherdevice.h>
8 #include <linux/list.h>
9 #include <linux/random.h>
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/string.h>
13 #include <net/mac80211.h>
14 #include "wme.h"
15 #include "ieee80211_i.h"
16 #include "mesh.h"
17 #include <linux/rhashtable.h>
18 
19 static void mesh_path_free_rcu(struct mesh_table *tbl, struct mesh_path *mpath);
20 
21 static u32 mesh_table_hash(const void *addr, u32 len, u32 seed)
22 {
23 	/* Use last four bytes of hw addr as hash index */
24 	return jhash_1word(__get_unaligned_cpu32((u8 *)addr + 2), seed);
25 }
26 
27 static const struct rhashtable_params mesh_rht_params = {
28 	.nelem_hint = 2,
29 	.automatic_shrinking = true,
30 	.key_len = ETH_ALEN,
31 	.key_offset = offsetof(struct mesh_path, dst),
32 	.head_offset = offsetof(struct mesh_path, rhash),
33 	.hashfn = mesh_table_hash,
34 };
35 
36 static const struct rhashtable_params fast_tx_rht_params = {
37 	.nelem_hint = 10,
38 	.automatic_shrinking = true,
39 	.key_len = sizeof_field(struct ieee80211_mesh_fast_tx, key),
40 	.key_offset = offsetof(struct ieee80211_mesh_fast_tx, key),
41 	.head_offset = offsetof(struct ieee80211_mesh_fast_tx, rhash),
42 	.hashfn = mesh_table_hash,
43 };
44 
45 static void __mesh_fast_tx_entry_free(void *ptr, void *tblptr)
46 {
47 	struct ieee80211_mesh_fast_tx *entry = ptr;
48 
49 	kfree_rcu(entry, fast_tx.rcu_head);
50 }
51 
52 static void mesh_fast_tx_deinit(struct ieee80211_sub_if_data *sdata)
53 {
54 	struct mesh_tx_cache *cache;
55 
56 	cache = &sdata->u.mesh.tx_cache;
57 	rhashtable_free_and_destroy(&cache->rht,
58 				    __mesh_fast_tx_entry_free, NULL);
59 }
60 
61 static void mesh_fast_tx_init(struct ieee80211_sub_if_data *sdata)
62 {
63 	struct mesh_tx_cache *cache;
64 
65 	cache = &sdata->u.mesh.tx_cache;
66 	rhashtable_init(&cache->rht, &fast_tx_rht_params);
67 	INIT_HLIST_HEAD(&cache->walk_head);
68 	spin_lock_init(&cache->walk_lock);
69 }
70 
71 static inline bool mpath_expired(struct mesh_path *mpath)
72 {
73 	return (mpath->flags & MESH_PATH_ACTIVE) &&
74 	       time_after(jiffies, mpath->exp_time) &&
75 	       !(mpath->flags & MESH_PATH_FIXED);
76 }
77 
78 static void mesh_path_rht_free(void *ptr, void *tblptr)
79 {
80 	struct mesh_path *mpath = ptr;
81 	struct mesh_table *tbl = tblptr;
82 
83 	mesh_path_free_rcu(tbl, mpath);
84 }
85 
86 static void mesh_table_init(struct mesh_table *tbl)
87 {
88 	INIT_HLIST_HEAD(&tbl->known_gates);
89 	INIT_HLIST_HEAD(&tbl->walk_head);
90 	atomic_set(&tbl->entries,  0);
91 	spin_lock_init(&tbl->gates_lock);
92 	spin_lock_init(&tbl->walk_lock);
93 
94 	/* rhashtable_init() may fail only in case of wrong
95 	 * mesh_rht_params
96 	 */
97 	WARN_ON(rhashtable_init(&tbl->rhead, &mesh_rht_params));
98 }
99 
100 static void mesh_table_free(struct mesh_table *tbl)
101 {
102 	rhashtable_free_and_destroy(&tbl->rhead,
103 				    mesh_path_rht_free, tbl);
104 }
105 
106 /**
107  * mesh_path_assign_nexthop - update mesh path next hop
108  *
109  * @mpath: mesh path to update
110  * @sta: next hop to assign
111  *
112  * Locking: mpath->state_lock must be held when calling this function
113  */
114 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
115 {
116 	struct sk_buff *skb;
117 	struct ieee80211_hdr *hdr;
118 	unsigned long flags;
119 
120 	rcu_assign_pointer(mpath->next_hop, sta);
121 
122 	spin_lock_irqsave(&mpath->frame_queue.lock, flags);
123 	skb_queue_walk(&mpath->frame_queue, skb) {
124 		hdr = (struct ieee80211_hdr *) skb->data;
125 		memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
126 		memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
127 		ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr);
128 	}
129 
130 	spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
131 }
132 
133 static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
134 			     struct mesh_path *gate_mpath)
135 {
136 	struct ieee80211_hdr *hdr;
137 	struct ieee80211s_hdr *mshdr;
138 	int mesh_hdrlen, hdrlen;
139 	char *next_hop;
140 
141 	hdr = (struct ieee80211_hdr *) skb->data;
142 	hdrlen = ieee80211_hdrlen(hdr->frame_control);
143 	mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
144 
145 	if (!(mshdr->flags & MESH_FLAGS_AE)) {
146 		/* size of the fixed part of the mesh header */
147 		mesh_hdrlen = 6;
148 
149 		/* make room for the two extended addresses */
150 		skb_push(skb, 2 * ETH_ALEN);
151 		memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
152 
153 		hdr = (struct ieee80211_hdr *) skb->data;
154 
155 		/* we preserve the previous mesh header and only add
156 		 * the new addresses */
157 		mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
158 		mshdr->flags = MESH_FLAGS_AE_A5_A6;
159 		memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
160 		memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
161 	}
162 
163 	/* update next hop */
164 	hdr = (struct ieee80211_hdr *) skb->data;
165 	rcu_read_lock();
166 	next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
167 	memcpy(hdr->addr1, next_hop, ETH_ALEN);
168 	rcu_read_unlock();
169 	memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
170 	memcpy(hdr->addr3, dst_addr, ETH_ALEN);
171 }
172 
173 /**
174  * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
175  *
176  * This function is used to transfer or copy frames from an unresolved mpath to
177  * a gate mpath.  The function also adds the Address Extension field and
178  * updates the next hop.
179  *
180  * If a frame already has an Address Extension field, only the next hop and
181  * destination addresses are updated.
182  *
183  * The gate mpath must be an active mpath with a valid mpath->next_hop.
184  *
185  * @gate_mpath: An active mpath the frames will be sent to (i.e. the gate)
186  * @from_mpath: The failed mpath
187  * @copy: When true, copy all the frames to the new mpath queue.  When false,
188  * move them.
189  */
190 static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
191 				    struct mesh_path *from_mpath,
192 				    bool copy)
193 {
194 	struct sk_buff *skb, *fskb, *tmp;
195 	struct sk_buff_head failq;
196 	unsigned long flags;
197 
198 	if (WARN_ON(gate_mpath == from_mpath))
199 		return;
200 	if (WARN_ON(!gate_mpath->next_hop))
201 		return;
202 
203 	__skb_queue_head_init(&failq);
204 
205 	spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
206 	skb_queue_splice_init(&from_mpath->frame_queue, &failq);
207 	spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
208 
209 	skb_queue_walk_safe(&failq, fskb, tmp) {
210 		if (skb_queue_len(&gate_mpath->frame_queue) >=
211 				  MESH_FRAME_QUEUE_LEN) {
212 			mpath_dbg(gate_mpath->sdata, "mpath queue full!\n");
213 			break;
214 		}
215 
216 		skb = skb_copy(fskb, GFP_ATOMIC);
217 		if (WARN_ON(!skb))
218 			break;
219 
220 		prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
221 		skb_queue_tail(&gate_mpath->frame_queue, skb);
222 
223 		if (copy)
224 			continue;
225 
226 		__skb_unlink(fskb, &failq);
227 		kfree_skb(fskb);
228 	}
229 
230 	mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
231 		  gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
232 
233 	if (!copy)
234 		return;
235 
236 	spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
237 	skb_queue_splice(&failq, &from_mpath->frame_queue);
238 	spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
239 }
240 
241 
242 static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
243 				      struct ieee80211_sub_if_data *sdata)
244 {
245 	struct mesh_path *mpath;
246 
247 	mpath = rhashtable_lookup(&tbl->rhead, dst, mesh_rht_params);
248 
249 	if (mpath && mpath_expired(mpath)) {
250 		spin_lock_bh(&mpath->state_lock);
251 		mpath->flags &= ~MESH_PATH_ACTIVE;
252 		spin_unlock_bh(&mpath->state_lock);
253 	}
254 	return mpath;
255 }
256 
257 /**
258  * mesh_path_lookup - look up a path in the mesh path table
259  * @sdata: local subif
260  * @dst: hardware address (ETH_ALEN length) of destination
261  *
262  * Returns: pointer to the mesh path structure, or NULL if not found
263  *
264  * Locking: must be called within a read rcu section.
265  */
266 struct mesh_path *
267 mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
268 {
269 	return mpath_lookup(&sdata->u.mesh.mesh_paths, dst, sdata);
270 }
271 
272 struct mesh_path *
273 mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
274 {
275 	return mpath_lookup(&sdata->u.mesh.mpp_paths, dst, sdata);
276 }
277 
278 static struct mesh_path *
279 __mesh_path_lookup_by_idx(struct mesh_table *tbl, int idx)
280 {
281 	int i = 0;
282 	struct mesh_path *mpath;
283 
284 	hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
285 		if (i++ == idx)
286 			break;
287 	}
288 
289 	if (!mpath)
290 		return NULL;
291 
292 	if (mpath_expired(mpath)) {
293 		spin_lock_bh(&mpath->state_lock);
294 		mpath->flags &= ~MESH_PATH_ACTIVE;
295 		spin_unlock_bh(&mpath->state_lock);
296 	}
297 	return mpath;
298 }
299 
300 /**
301  * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
302  * @idx: index
303  * @sdata: local subif, or NULL for all entries
304  *
305  * Returns: pointer to the mesh path structure, or NULL if not found.
306  *
307  * Locking: must be called within a read rcu section.
308  */
309 struct mesh_path *
310 mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
311 {
312 	return __mesh_path_lookup_by_idx(&sdata->u.mesh.mesh_paths, idx);
313 }
314 
315 /**
316  * mpp_path_lookup_by_idx - look up a path in the proxy path table by its index
317  * @idx: index
318  * @sdata: local subif, or NULL for all entries
319  *
320  * Returns: pointer to the proxy path structure, or NULL if not found.
321  *
322  * Locking: must be called within a read rcu section.
323  */
324 struct mesh_path *
325 mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
326 {
327 	return __mesh_path_lookup_by_idx(&sdata->u.mesh.mpp_paths, idx);
328 }
329 
330 /**
331  * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
332  * @mpath: gate path to add to table
333  */
334 int mesh_path_add_gate(struct mesh_path *mpath)
335 {
336 	struct mesh_table *tbl;
337 	int err;
338 
339 	rcu_read_lock();
340 	tbl = &mpath->sdata->u.mesh.mesh_paths;
341 
342 	spin_lock_bh(&mpath->state_lock);
343 	if (mpath->is_gate) {
344 		err = -EEXIST;
345 		spin_unlock_bh(&mpath->state_lock);
346 		goto err_rcu;
347 	}
348 	mpath->is_gate = true;
349 	mpath->sdata->u.mesh.num_gates++;
350 
351 	spin_lock(&tbl->gates_lock);
352 	hlist_add_head_rcu(&mpath->gate_list, &tbl->known_gates);
353 	spin_unlock(&tbl->gates_lock);
354 
355 	spin_unlock_bh(&mpath->state_lock);
356 
357 	mpath_dbg(mpath->sdata,
358 		  "Mesh path: Recorded new gate: %pM. %d known gates\n",
359 		  mpath->dst, mpath->sdata->u.mesh.num_gates);
360 	err = 0;
361 err_rcu:
362 	rcu_read_unlock();
363 	return err;
364 }
365 
366 /**
367  * mesh_gate_del - remove a mesh gate from the list of known gates
368  * @tbl: table which holds our list of known gates
369  * @mpath: gate mpath
370  */
371 static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
372 {
373 	lockdep_assert_held(&mpath->state_lock);
374 	if (!mpath->is_gate)
375 		return;
376 
377 	mpath->is_gate = false;
378 	spin_lock_bh(&tbl->gates_lock);
379 	hlist_del_rcu(&mpath->gate_list);
380 	mpath->sdata->u.mesh.num_gates--;
381 	spin_unlock_bh(&tbl->gates_lock);
382 
383 	mpath_dbg(mpath->sdata,
384 		  "Mesh path: Deleted gate: %pM. %d known gates\n",
385 		  mpath->dst, mpath->sdata->u.mesh.num_gates);
386 }
387 
388 /**
389  * mesh_gate_num - number of gates known to this interface
390  * @sdata: subif data
391  */
392 int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
393 {
394 	return sdata->u.mesh.num_gates;
395 }
396 
397 static
398 struct mesh_path *mesh_path_new(struct ieee80211_sub_if_data *sdata,
399 				const u8 *dst, gfp_t gfp_flags)
400 {
401 	struct mesh_path *new_mpath;
402 
403 	new_mpath = kzalloc(sizeof(struct mesh_path), gfp_flags);
404 	if (!new_mpath)
405 		return NULL;
406 
407 	memcpy(new_mpath->dst, dst, ETH_ALEN);
408 	eth_broadcast_addr(new_mpath->rann_snd_addr);
409 	new_mpath->is_root = false;
410 	new_mpath->sdata = sdata;
411 	new_mpath->flags = 0;
412 	skb_queue_head_init(&new_mpath->frame_queue);
413 	new_mpath->exp_time = jiffies;
414 	spin_lock_init(&new_mpath->state_lock);
415 	timer_setup(&new_mpath->timer, mesh_path_timer, 0);
416 
417 	return new_mpath;
418 }
419 
420 static void mesh_fast_tx_entry_free(struct mesh_tx_cache *cache,
421 				    struct ieee80211_mesh_fast_tx *entry)
422 {
423 	hlist_del_rcu(&entry->walk_list);
424 	rhashtable_remove_fast(&cache->rht, &entry->rhash, fast_tx_rht_params);
425 	kfree_rcu(entry, fast_tx.rcu_head);
426 }
427 
428 struct ieee80211_mesh_fast_tx *
429 mesh_fast_tx_get(struct ieee80211_sub_if_data *sdata,
430 		 struct ieee80211_mesh_fast_tx_key *key)
431 {
432 	struct ieee80211_mesh_fast_tx *entry;
433 	struct mesh_tx_cache *cache;
434 
435 	cache = &sdata->u.mesh.tx_cache;
436 	entry = rhashtable_lookup(&cache->rht, key, fast_tx_rht_params);
437 	if (!entry)
438 		return NULL;
439 
440 	if (!(entry->mpath->flags & MESH_PATH_ACTIVE) ||
441 	    mpath_expired(entry->mpath)) {
442 		spin_lock_bh(&cache->walk_lock);
443 		entry = rhashtable_lookup(&cache->rht, key, fast_tx_rht_params);
444 		if (entry)
445 		    mesh_fast_tx_entry_free(cache, entry);
446 		spin_unlock_bh(&cache->walk_lock);
447 		return NULL;
448 	}
449 
450 	mesh_path_refresh(sdata, entry->mpath, NULL);
451 	if (entry->mppath)
452 		entry->mppath->exp_time = jiffies;
453 	entry->timestamp = jiffies;
454 
455 	return entry;
456 }
457 
458 void mesh_fast_tx_cache(struct ieee80211_sub_if_data *sdata,
459 			struct sk_buff *skb, struct mesh_path *mpath)
460 {
461 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
462 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
463 	struct ieee80211_mesh_fast_tx *entry, *prev;
464 	struct ieee80211_mesh_fast_tx build = {};
465 	struct ieee80211s_hdr *meshhdr;
466 	struct mesh_tx_cache *cache;
467 	struct ieee80211_key *key;
468 	struct mesh_path *mppath;
469 	struct sta_info *sta;
470 	u8 *qc;
471 
472 	if (sdata->noack_map ||
473 	    !ieee80211_is_data_qos(hdr->frame_control))
474 		return;
475 
476 	build.fast_tx.hdr_len = ieee80211_hdrlen(hdr->frame_control);
477 	meshhdr = (struct ieee80211s_hdr *)(skb->data + build.fast_tx.hdr_len);
478 	build.hdrlen = ieee80211_get_mesh_hdrlen(meshhdr);
479 
480 	cache = &sdata->u.mesh.tx_cache;
481 	if (atomic_read(&cache->rht.nelems) >= MESH_FAST_TX_CACHE_MAX_SIZE)
482 		return;
483 
484 	sta = rcu_dereference(mpath->next_hop);
485 	if (!sta)
486 		return;
487 
488 	build.key.type = MESH_FAST_TX_TYPE_LOCAL;
489 	if ((meshhdr->flags & MESH_FLAGS_AE) == MESH_FLAGS_AE_A5_A6) {
490 		/* This is required to keep the mppath alive */
491 		mppath = mpp_path_lookup(sdata, meshhdr->eaddr1);
492 		if (!mppath)
493 			return;
494 		build.mppath = mppath;
495 		if (!ether_addr_equal(meshhdr->eaddr2, sdata->vif.addr))
496 			build.key.type = MESH_FAST_TX_TYPE_PROXIED;
497 	} else if (ieee80211_has_a4(hdr->frame_control)) {
498 		mppath = mpath;
499 	} else {
500 		return;
501 	}
502 
503 	if (!ether_addr_equal(hdr->addr4, sdata->vif.addr))
504 		build.key.type = MESH_FAST_TX_TYPE_FORWARDED;
505 
506 	/* rate limit, in case fast xmit can't be enabled */
507 	if (mppath->fast_tx_check == jiffies)
508 		return;
509 
510 	mppath->fast_tx_check = jiffies;
511 
512 	/*
513 	 * Same use of the sta lock as in ieee80211_check_fast_xmit, in order
514 	 * to protect against concurrent sta key updates.
515 	 */
516 	spin_lock_bh(&sta->lock);
517 	key = rcu_access_pointer(sta->ptk[sta->ptk_idx]);
518 	if (!key)
519 		key = rcu_access_pointer(sdata->default_unicast_key);
520 	build.fast_tx.key = key;
521 
522 	if (key) {
523 		bool gen_iv, iv_spc;
524 
525 		gen_iv = key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV;
526 		iv_spc = key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE;
527 
528 		if (!(key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) ||
529 		    (key->flags & KEY_FLAG_TAINTED))
530 			goto unlock_sta;
531 
532 		switch (key->conf.cipher) {
533 		case WLAN_CIPHER_SUITE_CCMP:
534 		case WLAN_CIPHER_SUITE_CCMP_256:
535 			if (gen_iv)
536 				build.fast_tx.pn_offs = build.fast_tx.hdr_len;
537 			if (gen_iv || iv_spc)
538 				build.fast_tx.hdr_len += IEEE80211_CCMP_HDR_LEN;
539 			break;
540 		case WLAN_CIPHER_SUITE_GCMP:
541 		case WLAN_CIPHER_SUITE_GCMP_256:
542 			if (gen_iv)
543 				build.fast_tx.pn_offs = build.fast_tx.hdr_len;
544 			if (gen_iv || iv_spc)
545 				build.fast_tx.hdr_len += IEEE80211_GCMP_HDR_LEN;
546 			break;
547 		default:
548 			goto unlock_sta;
549 		}
550 	}
551 
552 	memcpy(build.key.addr, mppath->dst, ETH_ALEN);
553 	build.timestamp = jiffies;
554 	build.fast_tx.band = info->band;
555 	build.fast_tx.da_offs = offsetof(struct ieee80211_hdr, addr3);
556 	build.fast_tx.sa_offs = offsetof(struct ieee80211_hdr, addr4);
557 	build.mpath = mpath;
558 	memcpy(build.hdr, meshhdr, build.hdrlen);
559 	memcpy(build.hdr + build.hdrlen, rfc1042_header, sizeof(rfc1042_header));
560 	build.hdrlen += sizeof(rfc1042_header);
561 	memcpy(build.fast_tx.hdr, hdr, build.fast_tx.hdr_len);
562 
563 	hdr = (struct ieee80211_hdr *)build.fast_tx.hdr;
564 	if (build.fast_tx.key)
565 		hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
566 
567 	qc = ieee80211_get_qos_ctl(hdr);
568 	qc[1] |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT >> 8;
569 
570 	entry = kmemdup(&build, sizeof(build), GFP_ATOMIC);
571 	if (!entry)
572 		goto unlock_sta;
573 
574 	spin_lock(&cache->walk_lock);
575 	prev = rhashtable_lookup_get_insert_fast(&cache->rht,
576 						 &entry->rhash,
577 						 fast_tx_rht_params);
578 	if (unlikely(IS_ERR(prev))) {
579 		kfree(entry);
580 		goto unlock_cache;
581 	}
582 
583 	/*
584 	 * replace any previous entry in the hash table, in case we're
585 	 * replacing it with a different type (e.g. mpath -> mpp)
586 	 */
587 	if (unlikely(prev)) {
588 		rhashtable_replace_fast(&cache->rht, &prev->rhash,
589 					&entry->rhash, fast_tx_rht_params);
590 		hlist_del_rcu(&prev->walk_list);
591 		kfree_rcu(prev, fast_tx.rcu_head);
592 	}
593 
594 	hlist_add_head(&entry->walk_list, &cache->walk_head);
595 
596 unlock_cache:
597 	spin_unlock(&cache->walk_lock);
598 unlock_sta:
599 	spin_unlock_bh(&sta->lock);
600 }
601 
602 void mesh_fast_tx_gc(struct ieee80211_sub_if_data *sdata)
603 {
604 	unsigned long timeout = msecs_to_jiffies(MESH_FAST_TX_CACHE_TIMEOUT);
605 	struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache;
606 	struct ieee80211_mesh_fast_tx *entry;
607 	struct hlist_node *n;
608 
609 	if (atomic_read(&cache->rht.nelems) < MESH_FAST_TX_CACHE_THRESHOLD_SIZE)
610 		return;
611 
612 	spin_lock_bh(&cache->walk_lock);
613 	hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list)
614 		if (!time_is_after_jiffies(entry->timestamp + timeout))
615 			mesh_fast_tx_entry_free(cache, entry);
616 	spin_unlock_bh(&cache->walk_lock);
617 }
618 
619 void mesh_fast_tx_flush_mpath(struct mesh_path *mpath)
620 {
621 	struct ieee80211_sub_if_data *sdata = mpath->sdata;
622 	struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache;
623 	struct ieee80211_mesh_fast_tx *entry;
624 	struct hlist_node *n;
625 
626 	spin_lock_bh(&cache->walk_lock);
627 	hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list)
628 		if (entry->mpath == mpath)
629 			mesh_fast_tx_entry_free(cache, entry);
630 	spin_unlock_bh(&cache->walk_lock);
631 }
632 
633 void mesh_fast_tx_flush_sta(struct ieee80211_sub_if_data *sdata,
634 			    struct sta_info *sta)
635 {
636 	struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache;
637 	struct ieee80211_mesh_fast_tx *entry;
638 	struct hlist_node *n;
639 
640 	spin_lock_bh(&cache->walk_lock);
641 	hlist_for_each_entry_safe(entry, n, &cache->walk_head, walk_list)
642 		if (rcu_access_pointer(entry->mpath->next_hop) == sta)
643 			mesh_fast_tx_entry_free(cache, entry);
644 	spin_unlock_bh(&cache->walk_lock);
645 }
646 
647 void mesh_fast_tx_flush_addr(struct ieee80211_sub_if_data *sdata,
648 			     const u8 *addr)
649 {
650 	struct mesh_tx_cache *cache = &sdata->u.mesh.tx_cache;
651 	struct ieee80211_mesh_fast_tx_key key = {};
652 	struct ieee80211_mesh_fast_tx *entry;
653 	int i;
654 
655 	ether_addr_copy(key.addr, addr);
656 	spin_lock_bh(&cache->walk_lock);
657 	for (i = 0; i < NUM_MESH_FAST_TX_TYPE; i++) {
658 		key.type = i;
659 		entry = rhashtable_lookup_fast(&cache->rht, &key, fast_tx_rht_params);
660 		if (entry)
661 			mesh_fast_tx_entry_free(cache, entry);
662 	}
663 	spin_unlock_bh(&cache->walk_lock);
664 }
665 
666 /**
667  * mesh_path_add - allocate and add a new path to the mesh path table
668  * @dst: destination address of the path (ETH_ALEN length)
669  * @sdata: local subif
670  *
671  * Returns: 0 on success
672  *
673  * State: the initial state of the new path is set to 0
674  */
675 struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
676 				const u8 *dst)
677 {
678 	struct mesh_table *tbl;
679 	struct mesh_path *mpath, *new_mpath;
680 
681 	if (ether_addr_equal(dst, sdata->vif.addr))
682 		/* never add ourselves as neighbours */
683 		return ERR_PTR(-ENOTSUPP);
684 
685 	if (is_multicast_ether_addr(dst))
686 		return ERR_PTR(-ENOTSUPP);
687 
688 	if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
689 		return ERR_PTR(-ENOSPC);
690 
691 	new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC);
692 	if (!new_mpath)
693 		return ERR_PTR(-ENOMEM);
694 
695 	tbl = &sdata->u.mesh.mesh_paths;
696 	spin_lock_bh(&tbl->walk_lock);
697 	mpath = rhashtable_lookup_get_insert_fast(&tbl->rhead,
698 						  &new_mpath->rhash,
699 						  mesh_rht_params);
700 	if (!mpath)
701 		hlist_add_head(&new_mpath->walk_list, &tbl->walk_head);
702 	spin_unlock_bh(&tbl->walk_lock);
703 
704 	if (mpath) {
705 		kfree(new_mpath);
706 
707 		if (IS_ERR(mpath))
708 			return mpath;
709 
710 		new_mpath = mpath;
711 	}
712 
713 	sdata->u.mesh.mesh_paths_generation++;
714 	return new_mpath;
715 }
716 
717 int mpp_path_add(struct ieee80211_sub_if_data *sdata,
718 		 const u8 *dst, const u8 *mpp)
719 {
720 	struct mesh_table *tbl;
721 	struct mesh_path *new_mpath;
722 	int ret;
723 
724 	if (ether_addr_equal(dst, sdata->vif.addr))
725 		/* never add ourselves as neighbours */
726 		return -ENOTSUPP;
727 
728 	if (is_multicast_ether_addr(dst))
729 		return -ENOTSUPP;
730 
731 	new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC);
732 
733 	if (!new_mpath)
734 		return -ENOMEM;
735 
736 	memcpy(new_mpath->mpp, mpp, ETH_ALEN);
737 	tbl = &sdata->u.mesh.mpp_paths;
738 
739 	spin_lock_bh(&tbl->walk_lock);
740 	ret = rhashtable_lookup_insert_fast(&tbl->rhead,
741 					    &new_mpath->rhash,
742 					    mesh_rht_params);
743 	if (!ret)
744 		hlist_add_head_rcu(&new_mpath->walk_list, &tbl->walk_head);
745 	spin_unlock_bh(&tbl->walk_lock);
746 
747 	if (ret)
748 		kfree(new_mpath);
749 	else
750 		mesh_fast_tx_flush_addr(sdata, dst);
751 
752 	sdata->u.mesh.mpp_paths_generation++;
753 	return ret;
754 }
755 
756 
757 /**
758  * mesh_plink_broken - deactivates paths and sends perr when a link breaks
759  *
760  * @sta: broken peer link
761  *
762  * This function must be called from the rate control algorithm if enough
763  * delivery errors suggest that a peer link is no longer usable.
764  */
765 void mesh_plink_broken(struct sta_info *sta)
766 {
767 	struct ieee80211_sub_if_data *sdata = sta->sdata;
768 	struct mesh_table *tbl = &sdata->u.mesh.mesh_paths;
769 	static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
770 	struct mesh_path *mpath;
771 
772 	rcu_read_lock();
773 	hlist_for_each_entry_rcu(mpath, &tbl->walk_head, walk_list) {
774 		if (rcu_access_pointer(mpath->next_hop) == sta &&
775 		    mpath->flags & MESH_PATH_ACTIVE &&
776 		    !(mpath->flags & MESH_PATH_FIXED)) {
777 			spin_lock_bh(&mpath->state_lock);
778 			mpath->flags &= ~MESH_PATH_ACTIVE;
779 			++mpath->sn;
780 			spin_unlock_bh(&mpath->state_lock);
781 			mesh_path_error_tx(sdata,
782 				sdata->u.mesh.mshcfg.element_ttl,
783 				mpath->dst, mpath->sn,
784 				WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
785 		}
786 	}
787 	rcu_read_unlock();
788 }
789 
790 static void mesh_path_free_rcu(struct mesh_table *tbl,
791 			       struct mesh_path *mpath)
792 {
793 	struct ieee80211_sub_if_data *sdata = mpath->sdata;
794 
795 	spin_lock_bh(&mpath->state_lock);
796 	mpath->flags |= MESH_PATH_RESOLVING | MESH_PATH_DELETED;
797 	mesh_gate_del(tbl, mpath);
798 	spin_unlock_bh(&mpath->state_lock);
799 	timer_shutdown_sync(&mpath->timer);
800 	atomic_dec(&sdata->u.mesh.mpaths);
801 	atomic_dec(&tbl->entries);
802 	mesh_path_flush_pending(mpath);
803 	kfree_rcu(mpath, rcu);
804 }
805 
806 static void __mesh_path_del(struct mesh_table *tbl, struct mesh_path *mpath)
807 {
808 	hlist_del_rcu(&mpath->walk_list);
809 	rhashtable_remove_fast(&tbl->rhead, &mpath->rhash, mesh_rht_params);
810 	if (tbl == &mpath->sdata->u.mesh.mpp_paths)
811 		mesh_fast_tx_flush_addr(mpath->sdata, mpath->dst);
812 	else
813 		mesh_fast_tx_flush_mpath(mpath);
814 	mesh_path_free_rcu(tbl, mpath);
815 }
816 
817 /**
818  * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
819  *
820  * @sta: mesh peer to match
821  *
822  * RCU notes: this function is called when a mesh plink transitions from
823  * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
824  * allows path creation. This will happen before the sta can be freed (because
825  * sta_info_destroy() calls this) so any reader in a rcu read block will be
826  * protected against the plink disappearing.
827  */
828 void mesh_path_flush_by_nexthop(struct sta_info *sta)
829 {
830 	struct ieee80211_sub_if_data *sdata = sta->sdata;
831 	struct mesh_table *tbl = &sdata->u.mesh.mesh_paths;
832 	struct mesh_path *mpath;
833 	struct hlist_node *n;
834 
835 	spin_lock_bh(&tbl->walk_lock);
836 	hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
837 		if (rcu_access_pointer(mpath->next_hop) == sta)
838 			__mesh_path_del(tbl, mpath);
839 	}
840 	spin_unlock_bh(&tbl->walk_lock);
841 }
842 
843 static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
844 			       const u8 *proxy)
845 {
846 	struct mesh_table *tbl = &sdata->u.mesh.mpp_paths;
847 	struct mesh_path *mpath;
848 	struct hlist_node *n;
849 
850 	spin_lock_bh(&tbl->walk_lock);
851 	hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
852 		if (ether_addr_equal(mpath->mpp, proxy))
853 			__mesh_path_del(tbl, mpath);
854 	}
855 	spin_unlock_bh(&tbl->walk_lock);
856 }
857 
858 static void table_flush_by_iface(struct mesh_table *tbl)
859 {
860 	struct mesh_path *mpath;
861 	struct hlist_node *n;
862 
863 	spin_lock_bh(&tbl->walk_lock);
864 	hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
865 		__mesh_path_del(tbl, mpath);
866 	}
867 	spin_unlock_bh(&tbl->walk_lock);
868 }
869 
870 /**
871  * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
872  *
873  * This function deletes both mesh paths as well as mesh portal paths.
874  *
875  * @sdata: interface data to match
876  *
877  */
878 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
879 {
880 	table_flush_by_iface(&sdata->u.mesh.mesh_paths);
881 	table_flush_by_iface(&sdata->u.mesh.mpp_paths);
882 }
883 
884 /**
885  * table_path_del - delete a path from the mesh or mpp table
886  *
887  * @tbl: mesh or mpp path table
888  * @sdata: local subif
889  * @addr: dst address (ETH_ALEN length)
890  *
891  * Returns: 0 if successful
892  */
893 static int table_path_del(struct mesh_table *tbl,
894 			  struct ieee80211_sub_if_data *sdata,
895 			  const u8 *addr)
896 {
897 	struct mesh_path *mpath;
898 
899 	spin_lock_bh(&tbl->walk_lock);
900 	mpath = rhashtable_lookup_fast(&tbl->rhead, addr, mesh_rht_params);
901 	if (!mpath) {
902 		spin_unlock_bh(&tbl->walk_lock);
903 		return -ENXIO;
904 	}
905 
906 	__mesh_path_del(tbl, mpath);
907 	spin_unlock_bh(&tbl->walk_lock);
908 	return 0;
909 }
910 
911 
912 /**
913  * mesh_path_del - delete a mesh path from the table
914  *
915  * @addr: dst address (ETH_ALEN length)
916  * @sdata: local subif
917  *
918  * Returns: 0 if successful
919  */
920 int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
921 {
922 	int err;
923 
924 	/* flush relevant mpp entries first */
925 	mpp_flush_by_proxy(sdata, addr);
926 
927 	err = table_path_del(&sdata->u.mesh.mesh_paths, sdata, addr);
928 	sdata->u.mesh.mesh_paths_generation++;
929 	return err;
930 }
931 
932 /**
933  * mesh_path_tx_pending - sends pending frames in a mesh path queue
934  *
935  * @mpath: mesh path to activate
936  *
937  * Locking: the state_lock of the mpath structure must NOT be held when calling
938  * this function.
939  */
940 void mesh_path_tx_pending(struct mesh_path *mpath)
941 {
942 	if (mpath->flags & MESH_PATH_ACTIVE)
943 		ieee80211_add_pending_skbs(mpath->sdata->local,
944 				&mpath->frame_queue);
945 }
946 
947 /**
948  * mesh_path_send_to_gates - sends pending frames to all known mesh gates
949  *
950  * @mpath: mesh path whose queue will be emptied
951  *
952  * If there is only one gate, the frames are transferred from the failed mpath
953  * queue to that gate's queue.  If there are more than one gates, the frames
954  * are copied from each gate to the next.  After frames are copied, the
955  * mpath queues are emptied onto the transmission queue.
956  */
957 int mesh_path_send_to_gates(struct mesh_path *mpath)
958 {
959 	struct ieee80211_sub_if_data *sdata = mpath->sdata;
960 	struct mesh_table *tbl;
961 	struct mesh_path *from_mpath = mpath;
962 	struct mesh_path *gate;
963 	bool copy = false;
964 
965 	tbl = &sdata->u.mesh.mesh_paths;
966 
967 	rcu_read_lock();
968 	hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
969 		if (gate->flags & MESH_PATH_ACTIVE) {
970 			mpath_dbg(sdata, "Forwarding to %pM\n", gate->dst);
971 			mesh_path_move_to_queue(gate, from_mpath, copy);
972 			from_mpath = gate;
973 			copy = true;
974 		} else {
975 			mpath_dbg(sdata,
976 				  "Not forwarding to %pM (flags %#x)\n",
977 				  gate->dst, gate->flags);
978 		}
979 	}
980 
981 	hlist_for_each_entry_rcu(gate, &tbl->known_gates, gate_list) {
982 		mpath_dbg(sdata, "Sending to %pM\n", gate->dst);
983 		mesh_path_tx_pending(gate);
984 	}
985 	rcu_read_unlock();
986 
987 	return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
988 }
989 
990 /**
991  * mesh_path_discard_frame - discard a frame whose path could not be resolved
992  *
993  * @skb: frame to discard
994  * @sdata: network subif the frame was to be sent through
995  *
996  * Locking: the function must me called within a rcu_read_lock region
997  */
998 void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
999 			     struct sk_buff *skb)
1000 {
1001 	ieee80211_free_txskb(&sdata->local->hw, skb);
1002 	sdata->u.mesh.mshstats.dropped_frames_no_route++;
1003 }
1004 
1005 /**
1006  * mesh_path_flush_pending - free the pending queue of a mesh path
1007  *
1008  * @mpath: mesh path whose queue has to be freed
1009  *
1010  * Locking: the function must me called within a rcu_read_lock region
1011  */
1012 void mesh_path_flush_pending(struct mesh_path *mpath)
1013 {
1014 	struct ieee80211_sub_if_data *sdata = mpath->sdata;
1015 	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
1016 	struct mesh_preq_queue *preq, *tmp;
1017 	struct sk_buff *skb;
1018 
1019 	while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
1020 		mesh_path_discard_frame(mpath->sdata, skb);
1021 
1022 	spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
1023 	list_for_each_entry_safe(preq, tmp, &ifmsh->preq_queue.list, list) {
1024 		if (ether_addr_equal(mpath->dst, preq->dst)) {
1025 			list_del(&preq->list);
1026 			kfree(preq);
1027 			--ifmsh->preq_queue_len;
1028 		}
1029 	}
1030 	spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
1031 }
1032 
1033 /**
1034  * mesh_path_fix_nexthop - force a specific next hop for a mesh path
1035  *
1036  * @mpath: the mesh path to modify
1037  * @next_hop: the next hop to force
1038  *
1039  * Locking: this function must be called holding mpath->state_lock
1040  */
1041 void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
1042 {
1043 	spin_lock_bh(&mpath->state_lock);
1044 	mesh_path_assign_nexthop(mpath, next_hop);
1045 	mpath->sn = 0xffff;
1046 	mpath->metric = 0;
1047 	mpath->hop_count = 0;
1048 	mpath->exp_time = 0;
1049 	mpath->flags = MESH_PATH_FIXED | MESH_PATH_SN_VALID;
1050 	mesh_path_activate(mpath);
1051 	mesh_fast_tx_flush_mpath(mpath);
1052 	spin_unlock_bh(&mpath->state_lock);
1053 	ewma_mesh_fail_avg_init(&next_hop->mesh->fail_avg);
1054 	/* init it at a low value - 0 start is tricky */
1055 	ewma_mesh_fail_avg_add(&next_hop->mesh->fail_avg, 1);
1056 	mesh_path_tx_pending(mpath);
1057 }
1058 
1059 void mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
1060 {
1061 	mesh_table_init(&sdata->u.mesh.mesh_paths);
1062 	mesh_table_init(&sdata->u.mesh.mpp_paths);
1063 	mesh_fast_tx_init(sdata);
1064 }
1065 
1066 static
1067 void mesh_path_tbl_expire(struct ieee80211_sub_if_data *sdata,
1068 			  struct mesh_table *tbl)
1069 {
1070 	struct mesh_path *mpath;
1071 	struct hlist_node *n;
1072 
1073 	spin_lock_bh(&tbl->walk_lock);
1074 	hlist_for_each_entry_safe(mpath, n, &tbl->walk_head, walk_list) {
1075 		if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
1076 		    (!(mpath->flags & MESH_PATH_FIXED)) &&
1077 		     time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
1078 			__mesh_path_del(tbl, mpath);
1079 	}
1080 	spin_unlock_bh(&tbl->walk_lock);
1081 }
1082 
1083 void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
1084 {
1085 	mesh_path_tbl_expire(sdata, &sdata->u.mesh.mesh_paths);
1086 	mesh_path_tbl_expire(sdata, &sdata->u.mesh.mpp_paths);
1087 }
1088 
1089 void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata)
1090 {
1091 	mesh_fast_tx_deinit(sdata);
1092 	mesh_table_free(&sdata->u.mesh.mesh_paths);
1093 	mesh_table_free(&sdata->u.mesh.mpp_paths);
1094 }
1095