1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2008-2013 Jozsef Kadlecsik <kadlec@netfilter.org> */
3
4 /* Kernel module implementing an IP set type: the list:set type */
5
6 #include <linux/module.h>
7 #include <linux/ip.h>
8 #include <linux/rculist.h>
9 #include <linux/skbuff.h>
10 #include <linux/errno.h>
11
12 #include <linux/netfilter/ipset/ip_set.h>
13 #include <linux/netfilter/ipset/ip_set_list.h>
14
15 #define IPSET_TYPE_REV_MIN 0
16 /* 1 Counters support added */
17 /* 2 Comments support added */
18 #define IPSET_TYPE_REV_MAX 3 /* skbinfo support added */
19
20 MODULE_LICENSE("GPL");
21 MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@netfilter.org>");
22 IP_SET_MODULE_DESC("list:set", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX);
23 MODULE_ALIAS("ip_set_list:set");
24
25 /* Member elements */
26 struct set_elem {
27 struct rcu_head rcu;
28 struct list_head list;
29 struct ip_set *set; /* Sigh, in order to cleanup reference */
30 ip_set_id_t id;
31 } __aligned(__alignof__(u64));
32
33 struct set_adt_elem {
34 ip_set_id_t id;
35 ip_set_id_t refid;
36 int before;
37 };
38
39 /* Type structure */
40 struct list_set {
41 u32 size; /* size of set list array */
42 struct timer_list gc; /* garbage collection */
43 struct ip_set *set; /* attached to this ip_set */
44 struct net *net; /* namespace */
45 struct list_head members; /* the set members */
46 };
47
48 static int
list_set_ktest(struct ip_set * set,const struct sk_buff * skb,const struct xt_action_param * par,struct ip_set_adt_opt * opt,const struct ip_set_ext * ext)49 list_set_ktest(struct ip_set *set, const struct sk_buff *skb,
50 const struct xt_action_param *par,
51 struct ip_set_adt_opt *opt, const struct ip_set_ext *ext)
52 {
53 struct list_set *map = set->data;
54 struct ip_set_ext *mext = &opt->ext;
55 struct set_elem *e;
56 u32 flags = opt->cmdflags;
57 int ret;
58
59 /* Don't lookup sub-counters at all */
60 opt->cmdflags &= ~IPSET_FLAG_MATCH_COUNTERS;
61 if (opt->cmdflags & IPSET_FLAG_SKIP_SUBCOUNTER_UPDATE)
62 opt->cmdflags |= IPSET_FLAG_SKIP_COUNTER_UPDATE;
63 list_for_each_entry_rcu(e, &map->members, list) {
64 ret = ip_set_test(e->id, skb, par, opt);
65 if (ret <= 0)
66 continue;
67 if (ip_set_match_extensions(set, ext, mext, flags, e))
68 return 1;
69 }
70 return 0;
71 }
72
73 static int
list_set_kadd(struct ip_set * set,const struct sk_buff * skb,const struct xt_action_param * par,struct ip_set_adt_opt * opt,const struct ip_set_ext * ext)74 list_set_kadd(struct ip_set *set, const struct sk_buff *skb,
75 const struct xt_action_param *par,
76 struct ip_set_adt_opt *opt, const struct ip_set_ext *ext)
77 {
78 struct list_set *map = set->data;
79 struct set_elem *e;
80 int ret;
81
82 list_for_each_entry_rcu(e, &map->members, list) {
83 if (SET_WITH_TIMEOUT(set) &&
84 ip_set_timeout_expired(ext_timeout(e, set)))
85 continue;
86 ret = ip_set_add(e->id, skb, par, opt);
87 if (ret == 0)
88 return ret;
89 }
90 return 0;
91 }
92
93 static int
list_set_kdel(struct ip_set * set,const struct sk_buff * skb,const struct xt_action_param * par,struct ip_set_adt_opt * opt,const struct ip_set_ext * ext)94 list_set_kdel(struct ip_set *set, const struct sk_buff *skb,
95 const struct xt_action_param *par,
96 struct ip_set_adt_opt *opt, const struct ip_set_ext *ext)
97 {
98 struct list_set *map = set->data;
99 struct set_elem *e;
100 int ret;
101
102 list_for_each_entry_rcu(e, &map->members, list) {
103 if (SET_WITH_TIMEOUT(set) &&
104 ip_set_timeout_expired(ext_timeout(e, set)))
105 continue;
106 ret = ip_set_del(e->id, skb, par, opt);
107 if (ret == 0)
108 return ret;
109 }
110 return 0;
111 }
112
113 static int
list_set_kadt(struct ip_set * set,const struct sk_buff * skb,const struct xt_action_param * par,enum ipset_adt adt,struct ip_set_adt_opt * opt)114 list_set_kadt(struct ip_set *set, const struct sk_buff *skb,
115 const struct xt_action_param *par,
116 enum ipset_adt adt, struct ip_set_adt_opt *opt)
117 {
118 struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
119 int ret = -EINVAL;
120
121 rcu_read_lock();
122 switch (adt) {
123 case IPSET_TEST:
124 ret = list_set_ktest(set, skb, par, opt, &ext);
125 break;
126 case IPSET_ADD:
127 ret = list_set_kadd(set, skb, par, opt, &ext);
128 break;
129 case IPSET_DEL:
130 ret = list_set_kdel(set, skb, par, opt, &ext);
131 break;
132 default:
133 break;
134 }
135 rcu_read_unlock();
136
137 return ret;
138 }
139
140 /* Userspace interfaces: we are protected by the nfnl mutex */
141
142 static void
__list_set_del_rcu(struct rcu_head * rcu)143 __list_set_del_rcu(struct rcu_head * rcu)
144 {
145 struct set_elem *e = container_of(rcu, struct set_elem, rcu);
146 struct ip_set *set = e->set;
147
148 ip_set_ext_destroy(set, e);
149 kfree(e);
150 }
151
152 static void
list_set_del(struct ip_set * set,struct set_elem * e)153 list_set_del(struct ip_set *set, struct set_elem *e)
154 {
155 struct list_set *map = set->data;
156
157 set->elements--;
158 list_del_rcu(&e->list);
159 ip_set_put_byindex(map->net, e->id);
160 call_rcu(&e->rcu, __list_set_del_rcu);
161 }
162
163 static void
list_set_replace(struct ip_set * set,struct set_elem * e,struct set_elem * old)164 list_set_replace(struct ip_set *set, struct set_elem *e, struct set_elem *old)
165 {
166 struct list_set *map = set->data;
167
168 list_replace_rcu(&old->list, &e->list);
169 ip_set_put_byindex(map->net, old->id);
170 call_rcu(&old->rcu, __list_set_del_rcu);
171 }
172
173 static void
set_cleanup_entries(struct ip_set * set)174 set_cleanup_entries(struct ip_set *set)
175 {
176 struct list_set *map = set->data;
177 struct set_elem *e, *n;
178
179 list_for_each_entry_safe(e, n, &map->members, list)
180 if (ip_set_timeout_expired(ext_timeout(e, set)))
181 list_set_del(set, e);
182 }
183
184 static int
list_set_utest(struct ip_set * set,void * value,const struct ip_set_ext * ext,struct ip_set_ext * mext,u32 flags)185 list_set_utest(struct ip_set *set, void *value, const struct ip_set_ext *ext,
186 struct ip_set_ext *mext, u32 flags)
187 {
188 struct list_set *map = set->data;
189 struct set_adt_elem *d = value;
190 struct set_elem *e, *next, *prev = NULL;
191 int ret = 0;
192
193 rcu_read_lock();
194 list_for_each_entry_rcu(e, &map->members, list) {
195 if (SET_WITH_TIMEOUT(set) &&
196 ip_set_timeout_expired(ext_timeout(e, set)))
197 continue;
198 else if (e->id != d->id) {
199 prev = e;
200 continue;
201 }
202
203 if (d->before == 0) {
204 ret = 1;
205 goto out;
206 } else if (d->before > 0) {
207 next = list_next_entry(e, list);
208 ret = !list_is_last(&e->list, &map->members) &&
209 next->id == d->refid;
210 } else {
211 ret = prev && prev->id == d->refid;
212 }
213 goto out;
214 }
215 out:
216 rcu_read_unlock();
217 return ret;
218 }
219
220 static void
list_set_init_extensions(struct ip_set * set,const struct ip_set_ext * ext,struct set_elem * e)221 list_set_init_extensions(struct ip_set *set, const struct ip_set_ext *ext,
222 struct set_elem *e)
223 {
224 if (SET_WITH_COUNTER(set))
225 ip_set_init_counter(ext_counter(e, set), ext);
226 if (SET_WITH_COMMENT(set))
227 ip_set_init_comment(set, ext_comment(e, set), ext);
228 if (SET_WITH_SKBINFO(set))
229 ip_set_init_skbinfo(ext_skbinfo(e, set), ext);
230 /* Update timeout last */
231 if (SET_WITH_TIMEOUT(set))
232 ip_set_timeout_set(ext_timeout(e, set), ext->timeout);
233 }
234
235 static int
list_set_uadd(struct ip_set * set,void * value,const struct ip_set_ext * ext,struct ip_set_ext * mext,u32 flags)236 list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext,
237 struct ip_set_ext *mext, u32 flags)
238 {
239 struct list_set *map = set->data;
240 struct set_adt_elem *d = value;
241 struct set_elem *e, *n, *prev, *next;
242 bool flag_exist = flags & IPSET_FLAG_EXIST;
243
244 /* Find where to add the new entry */
245 n = prev = next = NULL;
246 list_for_each_entry_rcu(e, &map->members, list) {
247 if (SET_WITH_TIMEOUT(set) &&
248 ip_set_timeout_expired(ext_timeout(e, set)))
249 continue;
250 else if (d->id == e->id)
251 n = e;
252 else if (d->before == 0 || e->id != d->refid)
253 continue;
254 else if (d->before > 0)
255 next = e;
256 else
257 prev = e;
258 }
259
260 /* If before/after is used on an empty set */
261 if ((d->before > 0 && !next) ||
262 (d->before < 0 && !prev))
263 return -IPSET_ERR_REF_EXIST;
264
265 /* Re-add already existing element */
266 if (n) {
267 if (!flag_exist)
268 return -IPSET_ERR_EXIST;
269 /* Update extensions */
270 ip_set_ext_destroy(set, n);
271 list_set_init_extensions(set, ext, n);
272
273 /* Set is already added to the list */
274 ip_set_put_byindex(map->net, d->id);
275 return 0;
276 }
277 /* Add new entry */
278 if (d->before == 0) {
279 /* Append */
280 n = list_empty(&map->members) ? NULL :
281 list_last_entry(&map->members, struct set_elem, list);
282 } else if (d->before > 0) {
283 /* Insert after next element */
284 if (!list_is_last(&next->list, &map->members))
285 n = list_next_entry(next, list);
286 } else {
287 /* Insert before prev element */
288 if (prev->list.prev != &map->members)
289 n = list_prev_entry(prev, list);
290 }
291 /* Can we replace a timed out entry? */
292 if (n &&
293 !(SET_WITH_TIMEOUT(set) &&
294 ip_set_timeout_expired(ext_timeout(n, set))))
295 n = NULL;
296
297 e = kzalloc(set->dsize, GFP_ATOMIC);
298 if (!e)
299 return -ENOMEM;
300 e->id = d->id;
301 e->set = set;
302 INIT_LIST_HEAD(&e->list);
303 list_set_init_extensions(set, ext, e);
304 if (n)
305 list_set_replace(set, e, n);
306 else if (next)
307 list_add_tail_rcu(&e->list, &next->list);
308 else if (prev)
309 list_add_rcu(&e->list, &prev->list);
310 else
311 list_add_tail_rcu(&e->list, &map->members);
312 set->elements++;
313
314 return 0;
315 }
316
317 static int
list_set_udel(struct ip_set * set,void * value,const struct ip_set_ext * ext,struct ip_set_ext * mext,u32 flags)318 list_set_udel(struct ip_set *set, void *value, const struct ip_set_ext *ext,
319 struct ip_set_ext *mext, u32 flags)
320 {
321 struct list_set *map = set->data;
322 struct set_adt_elem *d = value;
323 struct set_elem *e, *n, *next, *prev = NULL;
324
325 list_for_each_entry_safe(e, n, &map->members, list) {
326 if (SET_WITH_TIMEOUT(set) &&
327 ip_set_timeout_expired(ext_timeout(e, set)))
328 continue;
329 else if (e->id != d->id) {
330 prev = e;
331 continue;
332 }
333
334 if (d->before > 0) {
335 next = list_next_entry(e, list);
336 if (list_is_last(&e->list, &map->members) ||
337 next->id != d->refid)
338 return -IPSET_ERR_REF_EXIST;
339 } else if (d->before < 0) {
340 if (!prev || prev->id != d->refid)
341 return -IPSET_ERR_REF_EXIST;
342 }
343 list_set_del(set, e);
344 return 0;
345 }
346 return d->before != 0 ? -IPSET_ERR_REF_EXIST : -IPSET_ERR_EXIST;
347 }
348
349 static int
list_set_uadt(struct ip_set * set,struct nlattr * tb[],enum ipset_adt adt,u32 * lineno,u32 flags,bool retried)350 list_set_uadt(struct ip_set *set, struct nlattr *tb[],
351 enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
352 {
353 struct list_set *map = set->data;
354 ipset_adtfn adtfn = set->variant->adt[adt];
355 struct set_adt_elem e = { .refid = IPSET_INVALID_ID };
356 struct ip_set_ext ext = IP_SET_INIT_UEXT(set);
357 struct ip_set *s;
358 int ret = 0;
359
360 if (tb[IPSET_ATTR_LINENO])
361 *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);
362
363 if (unlikely(!tb[IPSET_ATTR_NAME] ||
364 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
365 return -IPSET_ERR_PROTOCOL;
366
367 ret = ip_set_get_extensions(set, tb, &ext);
368 if (ret)
369 return ret;
370 e.id = ip_set_get_byname(map->net, nla_data(tb[IPSET_ATTR_NAME]), &s);
371 if (e.id == IPSET_INVALID_ID)
372 return -IPSET_ERR_NAME;
373 /* "Loop detection" */
374 if (s->type->features & IPSET_TYPE_NAME) {
375 ret = -IPSET_ERR_LOOP;
376 goto finish;
377 }
378
379 if (tb[IPSET_ATTR_CADT_FLAGS]) {
380 u32 f = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
381
382 e.before = f & IPSET_FLAG_BEFORE;
383 }
384
385 if (e.before && !tb[IPSET_ATTR_NAMEREF]) {
386 ret = -IPSET_ERR_BEFORE;
387 goto finish;
388 }
389
390 if (tb[IPSET_ATTR_NAMEREF]) {
391 e.refid = ip_set_get_byname(map->net,
392 nla_data(tb[IPSET_ATTR_NAMEREF]),
393 &s);
394 if (e.refid == IPSET_INVALID_ID) {
395 ret = -IPSET_ERR_NAMEREF;
396 goto finish;
397 }
398 if (!e.before)
399 e.before = -1;
400 }
401 if (adt != IPSET_TEST && SET_WITH_TIMEOUT(set))
402 set_cleanup_entries(set);
403
404 ret = adtfn(set, &e, &ext, &ext, flags);
405
406 finish:
407 if (e.refid != IPSET_INVALID_ID)
408 ip_set_put_byindex(map->net, e.refid);
409 if (adt != IPSET_ADD || ret)
410 ip_set_put_byindex(map->net, e.id);
411
412 return ip_set_eexist(ret, flags) ? 0 : ret;
413 }
414
415 static void
list_set_flush(struct ip_set * set)416 list_set_flush(struct ip_set *set)
417 {
418 struct list_set *map = set->data;
419 struct set_elem *e, *n;
420
421 list_for_each_entry_safe(e, n, &map->members, list)
422 list_set_del(set, e);
423 set->elements = 0;
424 set->ext_size = 0;
425 }
426
427 static void
list_set_destroy(struct ip_set * set)428 list_set_destroy(struct ip_set *set)
429 {
430 struct list_set *map = set->data;
431
432 WARN_ON_ONCE(!list_empty(&map->members));
433 kfree(map);
434
435 set->data = NULL;
436 }
437
438 /* Calculate the actual memory size of the set data */
439 static size_t
list_set_memsize(const struct list_set * map,size_t dsize)440 list_set_memsize(const struct list_set *map, size_t dsize)
441 {
442 struct set_elem *e;
443 u32 n = 0;
444
445 rcu_read_lock();
446 list_for_each_entry_rcu(e, &map->members, list)
447 n++;
448 rcu_read_unlock();
449
450 return (sizeof(*map) + n * dsize);
451 }
452
453 static int
list_set_head(struct ip_set * set,struct sk_buff * skb)454 list_set_head(struct ip_set *set, struct sk_buff *skb)
455 {
456 const struct list_set *map = set->data;
457 struct nlattr *nested;
458 size_t memsize = list_set_memsize(map, set->dsize) + set->ext_size;
459
460 nested = nla_nest_start(skb, IPSET_ATTR_DATA);
461 if (!nested)
462 goto nla_put_failure;
463 if (nla_put_net32(skb, IPSET_ATTR_SIZE, htonl(map->size)) ||
464 nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) ||
465 nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) ||
466 nla_put_net32(skb, IPSET_ATTR_ELEMENTS, htonl(set->elements)))
467 goto nla_put_failure;
468 if (unlikely(ip_set_put_flags(skb, set)))
469 goto nla_put_failure;
470 nla_nest_end(skb, nested);
471
472 return 0;
473 nla_put_failure:
474 return -EMSGSIZE;
475 }
476
477 static int
list_set_list(const struct ip_set * set,struct sk_buff * skb,struct netlink_callback * cb)478 list_set_list(const struct ip_set *set,
479 struct sk_buff *skb, struct netlink_callback *cb)
480 {
481 const struct list_set *map = set->data;
482 struct nlattr *atd, *nested;
483 u32 i = 0, first = cb->args[IPSET_CB_ARG0];
484 char name[IPSET_MAXNAMELEN];
485 struct set_elem *e;
486 int ret = 0;
487
488 atd = nla_nest_start(skb, IPSET_ATTR_ADT);
489 if (!atd)
490 return -EMSGSIZE;
491
492 rcu_read_lock();
493 list_for_each_entry_rcu(e, &map->members, list) {
494 if (i < first ||
495 (SET_WITH_TIMEOUT(set) &&
496 ip_set_timeout_expired(ext_timeout(e, set)))) {
497 i++;
498 continue;
499 }
500 nested = nla_nest_start(skb, IPSET_ATTR_DATA);
501 if (!nested)
502 goto nla_put_failure;
503 ip_set_name_byindex(map->net, e->id, name);
504 if (nla_put_string(skb, IPSET_ATTR_NAME, name))
505 goto nla_put_failure;
506 if (ip_set_put_extensions(skb, set, e, true))
507 goto nla_put_failure;
508 nla_nest_end(skb, nested);
509 i++;
510 }
511
512 nla_nest_end(skb, atd);
513 /* Set listing finished */
514 cb->args[IPSET_CB_ARG0] = 0;
515 goto out;
516
517 nla_put_failure:
518 nla_nest_cancel(skb, nested);
519 if (unlikely(i == first)) {
520 nla_nest_cancel(skb, atd);
521 cb->args[IPSET_CB_ARG0] = 0;
522 ret = -EMSGSIZE;
523 } else {
524 cb->args[IPSET_CB_ARG0] = i;
525 nla_nest_end(skb, atd);
526 }
527 out:
528 rcu_read_unlock();
529 return ret;
530 }
531
532 static bool
list_set_same_set(const struct ip_set * a,const struct ip_set * b)533 list_set_same_set(const struct ip_set *a, const struct ip_set *b)
534 {
535 const struct list_set *x = a->data;
536 const struct list_set *y = b->data;
537
538 return x->size == y->size &&
539 a->timeout == b->timeout &&
540 a->extensions == b->extensions;
541 }
542
543 static void
list_set_cancel_gc(struct ip_set * set)544 list_set_cancel_gc(struct ip_set *set)
545 {
546 struct list_set *map = set->data;
547
548 if (SET_WITH_TIMEOUT(set))
549 timer_shutdown_sync(&map->gc);
550
551 /* Flush list to drop references to other ipsets */
552 list_set_flush(set);
553 }
554
555 static const struct ip_set_type_variant set_variant = {
556 .kadt = list_set_kadt,
557 .uadt = list_set_uadt,
558 .adt = {
559 [IPSET_ADD] = list_set_uadd,
560 [IPSET_DEL] = list_set_udel,
561 [IPSET_TEST] = list_set_utest,
562 },
563 .destroy = list_set_destroy,
564 .flush = list_set_flush,
565 .head = list_set_head,
566 .list = list_set_list,
567 .same_set = list_set_same_set,
568 .cancel_gc = list_set_cancel_gc,
569 };
570
571 static void
list_set_gc(struct timer_list * t)572 list_set_gc(struct timer_list *t)
573 {
574 struct list_set *map = from_timer(map, t, gc);
575 struct ip_set *set = map->set;
576
577 spin_lock_bh(&set->lock);
578 set_cleanup_entries(set);
579 spin_unlock_bh(&set->lock);
580
581 map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
582 add_timer(&map->gc);
583 }
584
585 static void
list_set_gc_init(struct ip_set * set,void (* gc)(struct timer_list * t))586 list_set_gc_init(struct ip_set *set, void (*gc)(struct timer_list *t))
587 {
588 struct list_set *map = set->data;
589
590 timer_setup(&map->gc, gc, 0);
591 mod_timer(&map->gc, jiffies + IPSET_GC_PERIOD(set->timeout) * HZ);
592 }
593
594 /* Create list:set type of sets */
595
596 static bool
init_list_set(struct net * net,struct ip_set * set,u32 size)597 init_list_set(struct net *net, struct ip_set *set, u32 size)
598 {
599 struct list_set *map;
600
601 map = kzalloc(sizeof(*map), GFP_KERNEL);
602 if (!map)
603 return false;
604
605 map->size = size;
606 map->net = net;
607 map->set = set;
608 INIT_LIST_HEAD(&map->members);
609 set->data = map;
610
611 return true;
612 }
613
614 static struct lock_class_key list_set_lockdep_key;
615
616 static int
list_set_create(struct net * net,struct ip_set * set,struct nlattr * tb[],u32 flags)617 list_set_create(struct net *net, struct ip_set *set, struct nlattr *tb[],
618 u32 flags)
619 {
620 u32 size = IP_SET_LIST_DEFAULT_SIZE;
621
622 if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_SIZE) ||
623 !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
624 !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS)))
625 return -IPSET_ERR_PROTOCOL;
626
627 if (tb[IPSET_ATTR_SIZE])
628 size = ip_set_get_h32(tb[IPSET_ATTR_SIZE]);
629 if (size < IP_SET_LIST_MIN_SIZE)
630 size = IP_SET_LIST_MIN_SIZE;
631
632 lockdep_set_class(&set->lock, &list_set_lockdep_key);
633 set->variant = &set_variant;
634 set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem),
635 __alignof__(struct set_elem));
636 if (!init_list_set(net, set, size))
637 return -ENOMEM;
638 if (tb[IPSET_ATTR_TIMEOUT]) {
639 set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
640 list_set_gc_init(set, list_set_gc);
641 }
642 return 0;
643 }
644
645 static struct ip_set_type list_set_type __read_mostly = {
646 .name = "list:set",
647 .protocol = IPSET_PROTOCOL,
648 .features = IPSET_TYPE_NAME | IPSET_DUMP_LAST,
649 .dimension = IPSET_DIM_ONE,
650 .family = NFPROTO_UNSPEC,
651 .revision_min = IPSET_TYPE_REV_MIN,
652 .revision_max = IPSET_TYPE_REV_MAX,
653 .create = list_set_create,
654 .create_policy = {
655 [IPSET_ATTR_SIZE] = { .type = NLA_U32 },
656 [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
657 [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
658 },
659 .adt_policy = {
660 [IPSET_ATTR_NAME] = { .type = NLA_STRING,
661 .len = IPSET_MAXNAMELEN },
662 [IPSET_ATTR_NAMEREF] = { .type = NLA_STRING,
663 .len = IPSET_MAXNAMELEN },
664 [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 },
665 [IPSET_ATTR_LINENO] = { .type = NLA_U32 },
666 [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 },
667 [IPSET_ATTR_BYTES] = { .type = NLA_U64 },
668 [IPSET_ATTR_PACKETS] = { .type = NLA_U64 },
669 [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING,
670 .len = IPSET_MAX_COMMENT_SIZE },
671 [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 },
672 [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 },
673 [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 },
674 },
675 .me = THIS_MODULE,
676 };
677
678 static int __init
list_set_init(void)679 list_set_init(void)
680 {
681 return ip_set_type_register(&list_set_type);
682 }
683
684 static void __exit
list_set_fini(void)685 list_set_fini(void)
686 {
687 rcu_barrier();
688 ip_set_type_unregister(&list_set_type);
689 }
690
691 module_init(list_set_init);
692 module_exit(list_set_fini);
693