1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
4  *
5  * Development of this code funded by Astaro AG (http://www.astaro.com/)
6  */
7 
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/list.h>
12 #include <linux/rbtree.h>
13 #include <linux/netlink.h>
14 #include <linux/netfilter.h>
15 #include <linux/netfilter/nf_tables.h>
16 #include <net/netfilter/nf_tables_core.h>
17 
18 struct nft_rbtree {
19 	struct rb_root		root;
20 	rwlock_t		lock;
21 	seqcount_rwlock_t	count;
22 	struct delayed_work	gc_work;
23 };
24 
25 struct nft_rbtree_elem {
26 	struct rb_node		node;
27 	struct nft_set_ext	ext;
28 };
29 
30 static bool nft_rbtree_interval_end(const struct nft_rbtree_elem *rbe)
31 {
32 	return nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) &&
33 	       (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END);
34 }
35 
36 static bool nft_rbtree_interval_start(const struct nft_rbtree_elem *rbe)
37 {
38 	return !nft_rbtree_interval_end(rbe);
39 }
40 
41 static bool nft_rbtree_equal(const struct nft_set *set, const void *this,
42 			     const struct nft_rbtree_elem *interval)
43 {
44 	return memcmp(this, nft_set_ext_key(&interval->ext), set->klen) == 0;
45 }
46 
47 static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
48 				const u32 *key, const struct nft_set_ext **ext,
49 				unsigned int seq)
50 {
51 	struct nft_rbtree *priv = nft_set_priv(set);
52 	const struct nft_rbtree_elem *rbe, *interval = NULL;
53 	u8 genmask = nft_genmask_cur(net);
54 	const struct rb_node *parent;
55 	const void *this;
56 	int d;
57 
58 	parent = rcu_dereference_raw(priv->root.rb_node);
59 	while (parent != NULL) {
60 		if (read_seqcount_retry(&priv->count, seq))
61 			return false;
62 
63 		rbe = rb_entry(parent, struct nft_rbtree_elem, node);
64 
65 		this = nft_set_ext_key(&rbe->ext);
66 		d = memcmp(this, key, set->klen);
67 		if (d < 0) {
68 			parent = rcu_dereference_raw(parent->rb_left);
69 			if (interval &&
70 			    nft_rbtree_equal(set, this, interval) &&
71 			    nft_rbtree_interval_end(rbe) &&
72 			    nft_rbtree_interval_start(interval))
73 				continue;
74 			interval = rbe;
75 		} else if (d > 0)
76 			parent = rcu_dereference_raw(parent->rb_right);
77 		else {
78 			if (!nft_set_elem_active(&rbe->ext, genmask)) {
79 				parent = rcu_dereference_raw(parent->rb_left);
80 				continue;
81 			}
82 
83 			if (nft_set_elem_expired(&rbe->ext))
84 				return false;
85 
86 			if (nft_rbtree_interval_end(rbe)) {
87 				if (nft_set_is_anonymous(set))
88 					return false;
89 				parent = rcu_dereference_raw(parent->rb_left);
90 				interval = NULL;
91 				continue;
92 			}
93 
94 			*ext = &rbe->ext;
95 			return true;
96 		}
97 	}
98 
99 	if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
100 	    nft_set_elem_active(&interval->ext, genmask) &&
101 	    !nft_set_elem_expired(&interval->ext) &&
102 	    nft_rbtree_interval_start(interval)) {
103 		*ext = &interval->ext;
104 		return true;
105 	}
106 
107 	return false;
108 }
109 
110 INDIRECT_CALLABLE_SCOPE
111 bool nft_rbtree_lookup(const struct net *net, const struct nft_set *set,
112 		       const u32 *key, const struct nft_set_ext **ext)
113 {
114 	struct nft_rbtree *priv = nft_set_priv(set);
115 	unsigned int seq = read_seqcount_begin(&priv->count);
116 	bool ret;
117 
118 	ret = __nft_rbtree_lookup(net, set, key, ext, seq);
119 	if (ret || !read_seqcount_retry(&priv->count, seq))
120 		return ret;
121 
122 	read_lock_bh(&priv->lock);
123 	seq = read_seqcount_begin(&priv->count);
124 	ret = __nft_rbtree_lookup(net, set, key, ext, seq);
125 	read_unlock_bh(&priv->lock);
126 
127 	return ret;
128 }
129 
130 static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
131 			     const u32 *key, struct nft_rbtree_elem **elem,
132 			     unsigned int seq, unsigned int flags, u8 genmask)
133 {
134 	struct nft_rbtree_elem *rbe, *interval = NULL;
135 	struct nft_rbtree *priv = nft_set_priv(set);
136 	const struct rb_node *parent;
137 	const void *this;
138 	int d;
139 
140 	parent = rcu_dereference_raw(priv->root.rb_node);
141 	while (parent != NULL) {
142 		if (read_seqcount_retry(&priv->count, seq))
143 			return false;
144 
145 		rbe = rb_entry(parent, struct nft_rbtree_elem, node);
146 
147 		this = nft_set_ext_key(&rbe->ext);
148 		d = memcmp(this, key, set->klen);
149 		if (d < 0) {
150 			parent = rcu_dereference_raw(parent->rb_left);
151 			if (!(flags & NFT_SET_ELEM_INTERVAL_END))
152 				interval = rbe;
153 		} else if (d > 0) {
154 			parent = rcu_dereference_raw(parent->rb_right);
155 			if (flags & NFT_SET_ELEM_INTERVAL_END)
156 				interval = rbe;
157 		} else {
158 			if (!nft_set_elem_active(&rbe->ext, genmask)) {
159 				parent = rcu_dereference_raw(parent->rb_left);
160 				continue;
161 			}
162 
163 			if (nft_set_elem_expired(&rbe->ext))
164 				return false;
165 
166 			if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) ||
167 			    (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) ==
168 			    (flags & NFT_SET_ELEM_INTERVAL_END)) {
169 				*elem = rbe;
170 				return true;
171 			}
172 
173 			if (nft_rbtree_interval_end(rbe))
174 				interval = NULL;
175 
176 			parent = rcu_dereference_raw(parent->rb_left);
177 		}
178 	}
179 
180 	if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
181 	    nft_set_elem_active(&interval->ext, genmask) &&
182 	    !nft_set_elem_expired(&interval->ext) &&
183 	    ((!nft_rbtree_interval_end(interval) &&
184 	      !(flags & NFT_SET_ELEM_INTERVAL_END)) ||
185 	     (nft_rbtree_interval_end(interval) &&
186 	      (flags & NFT_SET_ELEM_INTERVAL_END)))) {
187 		*elem = interval;
188 		return true;
189 	}
190 
191 	return false;
192 }
193 
194 static void *nft_rbtree_get(const struct net *net, const struct nft_set *set,
195 			    const struct nft_set_elem *elem, unsigned int flags)
196 {
197 	struct nft_rbtree *priv = nft_set_priv(set);
198 	unsigned int seq = read_seqcount_begin(&priv->count);
199 	struct nft_rbtree_elem *rbe = ERR_PTR(-ENOENT);
200 	const u32 *key = (const u32 *)&elem->key.val;
201 	u8 genmask = nft_genmask_cur(net);
202 	bool ret;
203 
204 	ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
205 	if (ret || !read_seqcount_retry(&priv->count, seq))
206 		return rbe;
207 
208 	read_lock_bh(&priv->lock);
209 	seq = read_seqcount_begin(&priv->count);
210 	ret = __nft_rbtree_get(net, set, key, &rbe, seq, flags, genmask);
211 	if (!ret)
212 		rbe = ERR_PTR(-ENOENT);
213 	read_unlock_bh(&priv->lock);
214 
215 	return rbe;
216 }
217 
218 static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set,
219 			       struct nft_rbtree_elem *new,
220 			       struct nft_set_ext **ext)
221 {
222 	bool overlap = false, dup_end_left = false, dup_end_right = false;
223 	struct nft_rbtree *priv = nft_set_priv(set);
224 	u8 genmask = nft_genmask_next(net);
225 	struct nft_rbtree_elem *rbe;
226 	struct rb_node *parent, **p;
227 	int d;
228 
229 	/* Detect overlaps as we descend the tree. Set the flag in these cases:
230 	 *
231 	 * a1. _ _ __>|  ?_ _ __|  (insert end before existing end)
232 	 * a2. _ _ ___|  ?_ _ _>|  (insert end after existing end)
233 	 * a3. _ _ ___? >|_ _ __|  (insert start before existing end)
234 	 *
235 	 * and clear it later on, as we eventually reach the points indicated by
236 	 * '?' above, in the cases described below. We'll always meet these
237 	 * later, locally, due to tree ordering, and overlaps for the intervals
238 	 * that are the closest together are always evaluated last.
239 	 *
240 	 * b1. _ _ __>|  !_ _ __|  (insert end before existing start)
241 	 * b2. _ _ ___|  !_ _ _>|  (insert end after existing start)
242 	 * b3. _ _ ___! >|_ _ __|  (insert start after existing end, as a leaf)
243 	 *            '--' no nodes falling in this range
244 	 * b4.          >|_ _   !  (insert start before existing start)
245 	 *
246 	 * Case a3. resolves to b3.:
247 	 * - if the inserted start element is the leftmost, because the '0'
248 	 *   element in the tree serves as end element
249 	 * - otherwise, if an existing end is found immediately to the left. If
250 	 *   there are existing nodes in between, we need to further descend the
251 	 *   tree before we can conclude the new start isn't causing an overlap
252 	 *
253 	 * or to b4., which, preceded by a3., means we already traversed one or
254 	 * more existing intervals entirely, from the right.
255 	 *
256 	 * For a new, rightmost pair of elements, we'll hit cases b3. and b2.,
257 	 * in that order.
258 	 *
259 	 * The flag is also cleared in two special cases:
260 	 *
261 	 * b5. |__ _ _!|<_ _ _   (insert start right before existing end)
262 	 * b6. |__ _ >|!__ _ _   (insert end right after existing start)
263 	 *
264 	 * which always happen as last step and imply that no further
265 	 * overlapping is possible.
266 	 *
267 	 * Another special case comes from the fact that start elements matching
268 	 * an already existing start element are allowed: insertion is not
269 	 * performed but we return -EEXIST in that case, and the error will be
270 	 * cleared by the caller if NLM_F_EXCL is not present in the request.
271 	 * This way, request for insertion of an exact overlap isn't reported as
272 	 * error to userspace if not desired.
273 	 *
274 	 * However, if the existing start matches a pre-existing start, but the
275 	 * end element doesn't match the corresponding pre-existing end element,
276 	 * we need to report a partial overlap. This is a local condition that
277 	 * can be noticed without need for a tracking flag, by checking for a
278 	 * local duplicated end for a corresponding start, from left and right,
279 	 * separately.
280 	 */
281 
282 	parent = NULL;
283 	p = &priv->root.rb_node;
284 	while (*p != NULL) {
285 		parent = *p;
286 		rbe = rb_entry(parent, struct nft_rbtree_elem, node);
287 		d = memcmp(nft_set_ext_key(&rbe->ext),
288 			   nft_set_ext_key(&new->ext),
289 			   set->klen);
290 		if (d < 0) {
291 			p = &parent->rb_left;
292 
293 			if (nft_rbtree_interval_start(new)) {
294 				if (nft_rbtree_interval_end(rbe) &&
295 				    nft_set_elem_active(&rbe->ext, genmask) &&
296 				    !nft_set_elem_expired(&rbe->ext) && !*p)
297 					overlap = false;
298 			} else {
299 				if (dup_end_left && !*p)
300 					return -ENOTEMPTY;
301 
302 				overlap = nft_rbtree_interval_end(rbe) &&
303 					  nft_set_elem_active(&rbe->ext,
304 							      genmask) &&
305 					  !nft_set_elem_expired(&rbe->ext);
306 
307 				if (overlap) {
308 					dup_end_right = true;
309 					continue;
310 				}
311 			}
312 		} else if (d > 0) {
313 			p = &parent->rb_right;
314 
315 			if (nft_rbtree_interval_end(new)) {
316 				if (dup_end_right && !*p)
317 					return -ENOTEMPTY;
318 
319 				overlap = nft_rbtree_interval_end(rbe) &&
320 					  nft_set_elem_active(&rbe->ext,
321 							      genmask) &&
322 					  !nft_set_elem_expired(&rbe->ext);
323 
324 				if (overlap) {
325 					dup_end_left = true;
326 					continue;
327 				}
328 			} else if (nft_set_elem_active(&rbe->ext, genmask) &&
329 				   !nft_set_elem_expired(&rbe->ext)) {
330 				overlap = nft_rbtree_interval_end(rbe);
331 			}
332 		} else {
333 			if (nft_rbtree_interval_end(rbe) &&
334 			    nft_rbtree_interval_start(new)) {
335 				p = &parent->rb_left;
336 
337 				if (nft_set_elem_active(&rbe->ext, genmask) &&
338 				    !nft_set_elem_expired(&rbe->ext))
339 					overlap = false;
340 			} else if (nft_rbtree_interval_start(rbe) &&
341 				   nft_rbtree_interval_end(new)) {
342 				p = &parent->rb_right;
343 
344 				if (nft_set_elem_active(&rbe->ext, genmask) &&
345 				    !nft_set_elem_expired(&rbe->ext))
346 					overlap = false;
347 			} else if (nft_set_elem_active(&rbe->ext, genmask) &&
348 				   !nft_set_elem_expired(&rbe->ext)) {
349 				*ext = &rbe->ext;
350 				return -EEXIST;
351 			} else {
352 				overlap = false;
353 				if (nft_rbtree_interval_end(rbe))
354 					p = &parent->rb_left;
355 				else
356 					p = &parent->rb_right;
357 			}
358 		}
359 
360 		dup_end_left = dup_end_right = false;
361 	}
362 
363 	if (overlap)
364 		return -ENOTEMPTY;
365 
366 	rb_link_node_rcu(&new->node, parent, p);
367 	rb_insert_color(&new->node, &priv->root);
368 	return 0;
369 }
370 
371 static int nft_rbtree_insert(const struct net *net, const struct nft_set *set,
372 			     const struct nft_set_elem *elem,
373 			     struct nft_set_ext **ext)
374 {
375 	struct nft_rbtree *priv = nft_set_priv(set);
376 	struct nft_rbtree_elem *rbe = elem->priv;
377 	int err;
378 
379 	write_lock_bh(&priv->lock);
380 	write_seqcount_begin(&priv->count);
381 	err = __nft_rbtree_insert(net, set, rbe, ext);
382 	write_seqcount_end(&priv->count);
383 	write_unlock_bh(&priv->lock);
384 
385 	return err;
386 }
387 
388 static void nft_rbtree_remove(const struct net *net,
389 			      const struct nft_set *set,
390 			      const struct nft_set_elem *elem)
391 {
392 	struct nft_rbtree *priv = nft_set_priv(set);
393 	struct nft_rbtree_elem *rbe = elem->priv;
394 
395 	write_lock_bh(&priv->lock);
396 	write_seqcount_begin(&priv->count);
397 	rb_erase(&rbe->node, &priv->root);
398 	write_seqcount_end(&priv->count);
399 	write_unlock_bh(&priv->lock);
400 }
401 
402 static void nft_rbtree_activate(const struct net *net,
403 				const struct nft_set *set,
404 				const struct nft_set_elem *elem)
405 {
406 	struct nft_rbtree_elem *rbe = elem->priv;
407 
408 	nft_set_elem_change_active(net, set, &rbe->ext);
409 	nft_set_elem_clear_busy(&rbe->ext);
410 }
411 
412 static bool nft_rbtree_flush(const struct net *net,
413 			     const struct nft_set *set, void *priv)
414 {
415 	struct nft_rbtree_elem *rbe = priv;
416 
417 	if (!nft_set_elem_mark_busy(&rbe->ext) ||
418 	    !nft_is_active(net, &rbe->ext)) {
419 		nft_set_elem_change_active(net, set, &rbe->ext);
420 		return true;
421 	}
422 	return false;
423 }
424 
425 static void *nft_rbtree_deactivate(const struct net *net,
426 				   const struct nft_set *set,
427 				   const struct nft_set_elem *elem)
428 {
429 	const struct nft_rbtree *priv = nft_set_priv(set);
430 	const struct rb_node *parent = priv->root.rb_node;
431 	struct nft_rbtree_elem *rbe, *this = elem->priv;
432 	u8 genmask = nft_genmask_next(net);
433 	int d;
434 
435 	while (parent != NULL) {
436 		rbe = rb_entry(parent, struct nft_rbtree_elem, node);
437 
438 		d = memcmp(nft_set_ext_key(&rbe->ext), &elem->key.val,
439 					   set->klen);
440 		if (d < 0)
441 			parent = parent->rb_left;
442 		else if (d > 0)
443 			parent = parent->rb_right;
444 		else {
445 			if (nft_rbtree_interval_end(rbe) &&
446 			    nft_rbtree_interval_start(this)) {
447 				parent = parent->rb_left;
448 				continue;
449 			} else if (nft_rbtree_interval_start(rbe) &&
450 				   nft_rbtree_interval_end(this)) {
451 				parent = parent->rb_right;
452 				continue;
453 			} else if (!nft_set_elem_active(&rbe->ext, genmask)) {
454 				parent = parent->rb_left;
455 				continue;
456 			}
457 			nft_rbtree_flush(net, set, rbe);
458 			return rbe;
459 		}
460 	}
461 	return NULL;
462 }
463 
464 static void nft_rbtree_walk(const struct nft_ctx *ctx,
465 			    struct nft_set *set,
466 			    struct nft_set_iter *iter)
467 {
468 	struct nft_rbtree *priv = nft_set_priv(set);
469 	struct nft_rbtree_elem *rbe;
470 	struct nft_set_elem elem;
471 	struct rb_node *node;
472 
473 	read_lock_bh(&priv->lock);
474 	for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
475 		rbe = rb_entry(node, struct nft_rbtree_elem, node);
476 
477 		if (iter->count < iter->skip)
478 			goto cont;
479 		if (nft_set_elem_expired(&rbe->ext))
480 			goto cont;
481 		if (!nft_set_elem_active(&rbe->ext, iter->genmask))
482 			goto cont;
483 
484 		elem.priv = rbe;
485 
486 		iter->err = iter->fn(ctx, set, iter, &elem);
487 		if (iter->err < 0) {
488 			read_unlock_bh(&priv->lock);
489 			return;
490 		}
491 cont:
492 		iter->count++;
493 	}
494 	read_unlock_bh(&priv->lock);
495 }
496 
497 static void nft_rbtree_gc(struct work_struct *work)
498 {
499 	struct nft_rbtree_elem *rbe, *rbe_end = NULL, *rbe_prev = NULL;
500 	struct nft_set_gc_batch *gcb = NULL;
501 	struct nft_rbtree *priv;
502 	struct rb_node *node;
503 	struct nft_set *set;
504 
505 	priv = container_of(work, struct nft_rbtree, gc_work.work);
506 	set  = nft_set_container_of(priv);
507 
508 	write_lock_bh(&priv->lock);
509 	write_seqcount_begin(&priv->count);
510 	for (node = rb_first(&priv->root); node != NULL; node = rb_next(node)) {
511 		rbe = rb_entry(node, struct nft_rbtree_elem, node);
512 
513 		if (nft_rbtree_interval_end(rbe)) {
514 			rbe_end = rbe;
515 			continue;
516 		}
517 		if (!nft_set_elem_expired(&rbe->ext))
518 			continue;
519 		if (nft_set_elem_mark_busy(&rbe->ext))
520 			continue;
521 
522 		if (rbe_prev) {
523 			rb_erase(&rbe_prev->node, &priv->root);
524 			rbe_prev = NULL;
525 		}
526 		gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
527 		if (!gcb)
528 			break;
529 
530 		atomic_dec(&set->nelems);
531 		nft_set_gc_batch_add(gcb, rbe);
532 		rbe_prev = rbe;
533 
534 		if (rbe_end) {
535 			atomic_dec(&set->nelems);
536 			nft_set_gc_batch_add(gcb, rbe_end);
537 			rb_erase(&rbe_end->node, &priv->root);
538 			rbe_end = NULL;
539 		}
540 		node = rb_next(node);
541 		if (!node)
542 			break;
543 	}
544 	if (rbe_prev)
545 		rb_erase(&rbe_prev->node, &priv->root);
546 	write_seqcount_end(&priv->count);
547 	write_unlock_bh(&priv->lock);
548 
549 	rbe = nft_set_catchall_gc(set);
550 	if (rbe) {
551 		gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
552 		if (gcb)
553 			nft_set_gc_batch_add(gcb, rbe);
554 	}
555 	nft_set_gc_batch_complete(gcb);
556 
557 	queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
558 			   nft_set_gc_interval(set));
559 }
560 
561 static u64 nft_rbtree_privsize(const struct nlattr * const nla[],
562 			       const struct nft_set_desc *desc)
563 {
564 	return sizeof(struct nft_rbtree);
565 }
566 
567 static int nft_rbtree_init(const struct nft_set *set,
568 			   const struct nft_set_desc *desc,
569 			   const struct nlattr * const nla[])
570 {
571 	struct nft_rbtree *priv = nft_set_priv(set);
572 
573 	rwlock_init(&priv->lock);
574 	seqcount_rwlock_init(&priv->count, &priv->lock);
575 	priv->root = RB_ROOT;
576 
577 	INIT_DEFERRABLE_WORK(&priv->gc_work, nft_rbtree_gc);
578 	if (set->flags & NFT_SET_TIMEOUT)
579 		queue_delayed_work(system_power_efficient_wq, &priv->gc_work,
580 				   nft_set_gc_interval(set));
581 
582 	return 0;
583 }
584 
585 static void nft_rbtree_destroy(const struct nft_set *set)
586 {
587 	struct nft_rbtree *priv = nft_set_priv(set);
588 	struct nft_rbtree_elem *rbe;
589 	struct rb_node *node;
590 
591 	cancel_delayed_work_sync(&priv->gc_work);
592 	rcu_barrier();
593 	while ((node = priv->root.rb_node) != NULL) {
594 		rb_erase(node, &priv->root);
595 		rbe = rb_entry(node, struct nft_rbtree_elem, node);
596 		nft_set_elem_destroy(set, rbe, true);
597 	}
598 }
599 
600 static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
601 				struct nft_set_estimate *est)
602 {
603 	if (desc->field_count > 1)
604 		return false;
605 
606 	if (desc->size)
607 		est->size = sizeof(struct nft_rbtree) +
608 			    desc->size * sizeof(struct nft_rbtree_elem);
609 	else
610 		est->size = ~0;
611 
612 	est->lookup = NFT_SET_CLASS_O_LOG_N;
613 	est->space  = NFT_SET_CLASS_O_N;
614 
615 	return true;
616 }
617 
618 const struct nft_set_type nft_set_rbtree_type = {
619 	.features	= NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT,
620 	.ops		= {
621 		.privsize	= nft_rbtree_privsize,
622 		.elemsize	= offsetof(struct nft_rbtree_elem, ext),
623 		.estimate	= nft_rbtree_estimate,
624 		.init		= nft_rbtree_init,
625 		.destroy	= nft_rbtree_destroy,
626 		.insert		= nft_rbtree_insert,
627 		.remove		= nft_rbtree_remove,
628 		.deactivate	= nft_rbtree_deactivate,
629 		.flush		= nft_rbtree_flush,
630 		.activate	= nft_rbtree_activate,
631 		.lookup		= nft_rbtree_lookup,
632 		.walk		= nft_rbtree_walk,
633 		.get		= nft_rbtree_get,
634 	},
635 };
636