xref: /openbmc/linux/net/mac80211/mesh_pathtbl.c (revision 22246614)
1 /*
2  * Copyright (c) 2008 open80211s Ltd.
3  * Author:     Luis Carlos Cobo <luisca@cozybit.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License version 2 as
7  * published by the Free Software Foundation.
8  */
9 
10 #include <linux/etherdevice.h>
11 #include <linux/list.h>
12 #include <linux/netdevice.h>
13 #include <linux/random.h>
14 #include <linux/spinlock.h>
15 #include <linux/string.h>
16 #include <net/mac80211.h>
17 #include "ieee80211_i.h"
18 #include "mesh.h"
19 
20 /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
21 #define INIT_PATHS_SIZE_ORDER	2
22 
23 /* Keep the mean chain length below this constant */
24 #define MEAN_CHAIN_LEN		2
25 
26 #define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
27 				time_after(jiffies, mpath->exp_time) && \
28 				!(mpath->flags & MESH_PATH_FIXED))
29 
30 struct mpath_node {
31 	struct hlist_node list;
32 	struct rcu_head rcu;
33 	/* This indirection allows two different tables to point to the same
34 	 * mesh_path structure, useful when resizing
35 	 */
36 	struct mesh_path *mpath;
37 };
38 
39 static struct mesh_table *mesh_paths;
40 
41 /* This lock will have the grow table function as writer and add / delete nodes
42  * as readers. When reading the table (i.e. doing lookups) we are well protected
43  * by RCU
44  */
45 static DEFINE_RWLOCK(pathtbl_resize_lock);
46 
47 /**
48  *
49  * mesh_path_assign_nexthop - update mesh path next hop
50  *
51  * @mpath: mesh path to update
52  * @sta: next hop to assign
53  *
54  * Locking: mpath->state_lock must be held when calling this function
55  */
56 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
57 {
58 	rcu_assign_pointer(mpath->next_hop, sta);
59 }
60 
61 
62 /**
63  * mesh_path_lookup - look up a path in the mesh path table
64  * @dst: hardware address (ETH_ALEN length) of destination
65  * @dev: local interface
66  *
67  * Returns: pointer to the mesh path structure, or NULL if not found
68  *
69  * Locking: must be called within a read rcu section.
70  */
71 struct mesh_path *mesh_path_lookup(u8 *dst, struct net_device *dev)
72 {
73 	struct mesh_path *mpath;
74 	struct hlist_node *n;
75 	struct hlist_head *bucket;
76 	struct mesh_table *tbl;
77 	struct mpath_node *node;
78 
79 	tbl = rcu_dereference(mesh_paths);
80 
81 	bucket = &tbl->hash_buckets[mesh_table_hash(dst, dev, tbl)];
82 	hlist_for_each_entry_rcu(node, n, bucket, list) {
83 		mpath = node->mpath;
84 		if (mpath->dev == dev &&
85 				memcmp(dst, mpath->dst, ETH_ALEN) == 0) {
86 			if (MPATH_EXPIRED(mpath)) {
87 				spin_lock_bh(&mpath->state_lock);
88 				if (MPATH_EXPIRED(mpath))
89 					mpath->flags &= ~MESH_PATH_ACTIVE;
90 				spin_unlock_bh(&mpath->state_lock);
91 			}
92 			return mpath;
93 		}
94 	}
95 	return NULL;
96 }
97 
98 /**
99  * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
100  * @idx: index
101  * @dev: local interface, or NULL for all entries
102  *
103  * Returns: pointer to the mesh path structure, or NULL if not found.
104  *
105  * Locking: must be called within a read rcu section.
106  */
107 struct mesh_path *mesh_path_lookup_by_idx(int idx, struct net_device *dev)
108 {
109 	struct mpath_node *node;
110 	struct hlist_node *p;
111 	int i;
112 	int j = 0;
113 
114 	for_each_mesh_entry(mesh_paths, p, node, i) {
115 		if (dev && node->mpath->dev != dev)
116 			continue;
117 		if (j++ == idx) {
118 			if (MPATH_EXPIRED(node->mpath)) {
119 				spin_lock_bh(&node->mpath->state_lock);
120 				if (MPATH_EXPIRED(node->mpath))
121 					node->mpath->flags &= ~MESH_PATH_ACTIVE;
122 				spin_unlock_bh(&node->mpath->state_lock);
123 			}
124 			return node->mpath;
125 		}
126 	}
127 
128 	return NULL;
129 }
130 
131 /**
132  * mesh_path_add - allocate and add a new path to the mesh path table
133  * @addr: destination address of the path (ETH_ALEN length)
134  * @dev: local interface
135  *
136  * Returns: 0 on sucess
137  *
138  * State: the initial state of the new path is set to 0
139  */
140 int mesh_path_add(u8 *dst, struct net_device *dev)
141 {
142 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
143 	struct mesh_path *mpath, *new_mpath;
144 	struct mpath_node *node, *new_node;
145 	struct hlist_head *bucket;
146 	struct hlist_node *n;
147 	int grow = 0;
148 	int err = 0;
149 	u32 hash_idx;
150 
151 	if (memcmp(dst, dev->dev_addr, ETH_ALEN) == 0)
152 		/* never add ourselves as neighbours */
153 		return -ENOTSUPP;
154 
155 	if (is_multicast_ether_addr(dst))
156 		return -ENOTSUPP;
157 
158 	if (atomic_add_unless(&sdata->u.sta.mpaths, 1, MESH_MAX_MPATHS) == 0)
159 		return -ENOSPC;
160 
161 	new_mpath = kzalloc(sizeof(struct mesh_path), GFP_KERNEL);
162 	if (!new_mpath) {
163 		atomic_dec(&sdata->u.sta.mpaths);
164 		err = -ENOMEM;
165 		goto endadd2;
166 	}
167 	new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
168 	if (!new_node) {
169 		kfree(new_mpath);
170 		atomic_dec(&sdata->u.sta.mpaths);
171 		err = -ENOMEM;
172 		goto endadd2;
173 	}
174 
175 	read_lock(&pathtbl_resize_lock);
176 	memcpy(new_mpath->dst, dst, ETH_ALEN);
177 	new_mpath->dev = dev;
178 	new_mpath->flags = 0;
179 	skb_queue_head_init(&new_mpath->frame_queue);
180 	new_node->mpath = new_mpath;
181 	new_mpath->timer.data = (unsigned long) new_mpath;
182 	new_mpath->timer.function = mesh_path_timer;
183 	new_mpath->exp_time = jiffies;
184 	spin_lock_init(&new_mpath->state_lock);
185 	init_timer(&new_mpath->timer);
186 
187 	hash_idx = mesh_table_hash(dst, dev, mesh_paths);
188 	bucket = &mesh_paths->hash_buckets[hash_idx];
189 
190 	spin_lock(&mesh_paths->hashwlock[hash_idx]);
191 
192 	hlist_for_each_entry(node, n, bucket, list) {
193 		mpath = node->mpath;
194 		if (mpath->dev == dev && memcmp(dst, mpath->dst, ETH_ALEN)
195 				== 0) {
196 			err = -EEXIST;
197 			atomic_dec(&sdata->u.sta.mpaths);
198 			kfree(new_node);
199 			kfree(new_mpath);
200 			goto endadd;
201 		}
202 	}
203 
204 	hlist_add_head_rcu(&new_node->list, bucket);
205 	if (atomic_inc_return(&mesh_paths->entries) >=
206 		mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1))
207 		grow = 1;
208 
209 endadd:
210 	spin_unlock(&mesh_paths->hashwlock[hash_idx]);
211 	read_unlock(&pathtbl_resize_lock);
212 	if (!err && grow) {
213 		struct mesh_table *oldtbl, *newtbl;
214 
215 		write_lock(&pathtbl_resize_lock);
216 		oldtbl = mesh_paths;
217 		newtbl = mesh_table_grow(mesh_paths);
218 		if (!newtbl) {
219 			write_unlock(&pathtbl_resize_lock);
220 			return -ENOMEM;
221 		}
222 		rcu_assign_pointer(mesh_paths, newtbl);
223 		write_unlock(&pathtbl_resize_lock);
224 
225 		synchronize_rcu();
226 		mesh_table_free(oldtbl, false);
227 	}
228 endadd2:
229 	return err;
230 }
231 
232 
233 /**
234  * mesh_plink_broken - deactivates paths and sends perr when a link breaks
235  *
236  * @sta: broken peer link
237  *
238  * This function must be called from the rate control algorithm if enough
239  * delivery errors suggest that a peer link is no longer usable.
240  */
241 void mesh_plink_broken(struct sta_info *sta)
242 {
243 	struct mesh_path *mpath;
244 	struct mpath_node *node;
245 	struct hlist_node *p;
246 	struct net_device *dev = sta->sdata->dev;
247 	int i;
248 
249 	rcu_read_lock();
250 	for_each_mesh_entry(mesh_paths, p, node, i) {
251 		mpath = node->mpath;
252 		spin_lock_bh(&mpath->state_lock);
253 		if (mpath->next_hop == sta &&
254 		    mpath->flags & MESH_PATH_ACTIVE &&
255 		    !(mpath->flags & MESH_PATH_FIXED)) {
256 			mpath->flags &= ~MESH_PATH_ACTIVE;
257 			++mpath->dsn;
258 			spin_unlock_bh(&mpath->state_lock);
259 			mesh_path_error_tx(mpath->dst,
260 					cpu_to_le32(mpath->dsn),
261 					dev->broadcast, dev);
262 		} else
263 		spin_unlock_bh(&mpath->state_lock);
264 	}
265 	rcu_read_unlock();
266 }
267 EXPORT_SYMBOL(mesh_plink_broken);
268 
269 /**
270  * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
271  *
272  * @sta - mesh peer to match
273  *
274  * RCU notes: this function is called when a mesh plink transitions from
275  * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
276  * allows path creation. This will happen before the sta can be freed (because
277  * sta_info_destroy() calls this) so any reader in a rcu read block will be
278  * protected against the plink disappearing.
279  */
280 void mesh_path_flush_by_nexthop(struct sta_info *sta)
281 {
282 	struct mesh_path *mpath;
283 	struct mpath_node *node;
284 	struct hlist_node *p;
285 	int i;
286 
287 	for_each_mesh_entry(mesh_paths, p, node, i) {
288 		mpath = node->mpath;
289 		if (mpath->next_hop == sta)
290 			mesh_path_del(mpath->dst, mpath->dev);
291 	}
292 }
293 
294 void mesh_path_flush(struct net_device *dev)
295 {
296 	struct mesh_path *mpath;
297 	struct mpath_node *node;
298 	struct hlist_node *p;
299 	int i;
300 
301 	for_each_mesh_entry(mesh_paths, p, node, i) {
302 		mpath = node->mpath;
303 		if (mpath->dev == dev)
304 			mesh_path_del(mpath->dst, mpath->dev);
305 	}
306 }
307 
308 static void mesh_path_node_reclaim(struct rcu_head *rp)
309 {
310 	struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
311 	struct ieee80211_sub_if_data *sdata =
312 		IEEE80211_DEV_TO_SUB_IF(node->mpath->dev);
313 
314 	del_timer_sync(&node->mpath->timer);
315 	atomic_dec(&sdata->u.sta.mpaths);
316 	kfree(node->mpath);
317 	kfree(node);
318 }
319 
320 /**
321  * mesh_path_del - delete a mesh path from the table
322  *
323  * @addr: dst address (ETH_ALEN length)
324  * @dev: local interface
325  *
326  * Returns: 0 if succesful
327  */
328 int mesh_path_del(u8 *addr, struct net_device *dev)
329 {
330 	struct mesh_path *mpath;
331 	struct mpath_node *node;
332 	struct hlist_head *bucket;
333 	struct hlist_node *n;
334 	int hash_idx;
335 	int err = 0;
336 
337 	read_lock(&pathtbl_resize_lock);
338 	hash_idx = mesh_table_hash(addr, dev, mesh_paths);
339 	bucket = &mesh_paths->hash_buckets[hash_idx];
340 
341 	spin_lock(&mesh_paths->hashwlock[hash_idx]);
342 	hlist_for_each_entry(node, n, bucket, list) {
343 		mpath = node->mpath;
344 		if (mpath->dev == dev &&
345 				memcmp(addr, mpath->dst, ETH_ALEN) == 0) {
346 			spin_lock_bh(&mpath->state_lock);
347 			mpath->flags |= MESH_PATH_RESOLVING;
348 			hlist_del_rcu(&node->list);
349 			call_rcu(&node->rcu, mesh_path_node_reclaim);
350 			atomic_dec(&mesh_paths->entries);
351 			spin_unlock_bh(&mpath->state_lock);
352 			goto enddel;
353 		}
354 	}
355 
356 	err = -ENXIO;
357 enddel:
358 	spin_unlock(&mesh_paths->hashwlock[hash_idx]);
359 	read_unlock(&pathtbl_resize_lock);
360 	return err;
361 }
362 
363 /**
364  * mesh_path_tx_pending - sends pending frames in a mesh path queue
365  *
366  * @mpath: mesh path to activate
367  *
368  * Locking: the state_lock of the mpath structure must NOT be held when calling
369  * this function.
370  */
371 void mesh_path_tx_pending(struct mesh_path *mpath)
372 {
373 	struct sk_buff *skb;
374 
375 	while ((skb = skb_dequeue(&mpath->frame_queue)) &&
376 			(mpath->flags & MESH_PATH_ACTIVE))
377 		dev_queue_xmit(skb);
378 }
379 
380 /**
381  * mesh_path_discard_frame - discard a frame whose path could not be resolved
382  *
383  * @skb: frame to discard
384  * @dev: network device the frame was to be sent through
385  *
386  * If the frame was beign forwarded from another MP, a PERR frame will be sent
387  * to the precursor.
388  *
389  * Locking: the function must me called within a rcu_read_lock region
390  */
391 void mesh_path_discard_frame(struct sk_buff *skb, struct net_device *dev)
392 {
393 	struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
394 	struct mesh_path *mpath;
395 	u32 dsn = 0;
396 
397 	if (skb->pkt_type == PACKET_OTHERHOST) {
398 		struct ieee80211s_hdr *prev_meshhdr;
399 		int mshhdrlen;
400 		u8 *ra, *da;
401 
402 		prev_meshhdr = ((struct ieee80211s_hdr *)skb->cb);
403 		mshhdrlen = ieee80211_get_mesh_hdrlen(prev_meshhdr);
404 		da = skb->data;
405 		ra = MESH_PREQ(skb);
406 		mpath = mesh_path_lookup(da, dev);
407 		if (mpath)
408 			dsn = ++mpath->dsn;
409 		mesh_path_error_tx(skb->data, cpu_to_le32(dsn), ra, dev);
410 	}
411 
412 	kfree_skb(skb);
413 	sdata->u.sta.mshstats.dropped_frames_no_route++;
414 }
415 
416 /**
417  * mesh_path_flush_pending - free the pending queue of a mesh path
418  *
419  * @mpath: mesh path whose queue has to be freed
420  *
421  * Locking: the function must me called withing a rcu_read_lock region
422  */
423 void mesh_path_flush_pending(struct mesh_path *mpath)
424 {
425 	struct ieee80211_sub_if_data *sdata;
426 	struct sk_buff *skb;
427 
428 	sdata = IEEE80211_DEV_TO_SUB_IF(mpath->dev);
429 
430 	while ((skb = skb_dequeue(&mpath->frame_queue)) &&
431 			(mpath->flags & MESH_PATH_ACTIVE))
432 		mesh_path_discard_frame(skb, mpath->dev);
433 }
434 
435 /**
436  * mesh_path_fix_nexthop - force a specific next hop for a mesh path
437  *
438  * @mpath: the mesh path to modify
439  * @next_hop: the next hop to force
440  *
441  * Locking: this function must be called holding mpath->state_lock
442  */
443 void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
444 {
445 	spin_lock_bh(&mpath->state_lock);
446 	mesh_path_assign_nexthop(mpath, next_hop);
447 	mpath->dsn = 0xffff;
448 	mpath->metric = 0;
449 	mpath->hop_count = 0;
450 	mpath->exp_time = 0;
451 	mpath->flags |= MESH_PATH_FIXED;
452 	mesh_path_activate(mpath);
453 	spin_unlock_bh(&mpath->state_lock);
454 	mesh_path_tx_pending(mpath);
455 }
456 
457 static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
458 {
459 	struct mesh_path *mpath;
460 	struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
461 	mpath = node->mpath;
462 	hlist_del_rcu(p);
463 	synchronize_rcu();
464 	if (free_leafs)
465 		kfree(mpath);
466 	kfree(node);
467 }
468 
469 static void mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
470 {
471 	struct mesh_path *mpath;
472 	struct mpath_node *node, *new_node;
473 	u32 hash_idx;
474 
475 	node = hlist_entry(p, struct mpath_node, list);
476 	mpath = node->mpath;
477 	new_node = kmalloc(sizeof(struct mpath_node), GFP_KERNEL);
478 	new_node->mpath = mpath;
479 	hash_idx = mesh_table_hash(mpath->dst, mpath->dev, newtbl);
480 	hlist_add_head(&new_node->list,
481 			&newtbl->hash_buckets[hash_idx]);
482 }
483 
484 int mesh_pathtbl_init(void)
485 {
486 	mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
487 	mesh_paths->free_node = &mesh_path_node_free;
488 	mesh_paths->copy_node = &mesh_path_node_copy;
489 	mesh_paths->mean_chain_len = MEAN_CHAIN_LEN;
490 	if (!mesh_paths)
491 		return -ENOMEM;
492 	return 0;
493 }
494 
495 void mesh_path_expire(struct net_device *dev)
496 {
497 	struct mesh_path *mpath;
498 	struct mpath_node *node;
499 	struct hlist_node *p;
500 	int i;
501 
502 	read_lock(&pathtbl_resize_lock);
503 	for_each_mesh_entry(mesh_paths, p, node, i) {
504 		if (node->mpath->dev != dev)
505 			continue;
506 		mpath = node->mpath;
507 		spin_lock_bh(&mpath->state_lock);
508 		if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
509 		    (!(mpath->flags & MESH_PATH_FIXED)) &&
510 			time_after(jiffies,
511 			 mpath->exp_time + MESH_PATH_EXPIRE)) {
512 			spin_unlock_bh(&mpath->state_lock);
513 			mesh_path_del(mpath->dst, mpath->dev);
514 		} else
515 			spin_unlock_bh(&mpath->state_lock);
516 	}
517 	read_unlock(&pathtbl_resize_lock);
518 }
519 
520 void mesh_pathtbl_unregister(void)
521 {
522 	mesh_table_free(mesh_paths, true);
523 }
524