1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* Copyright (C) 2013 Jozsef Kadlecsik <kadlec@netfilter.org> */
3 
4 #ifndef __IP_SET_BITMAP_IP_GEN_H
5 #define __IP_SET_BITMAP_IP_GEN_H
6 
7 #define mtype_do_test		IPSET_TOKEN(MTYPE, _do_test)
8 #define mtype_gc_test		IPSET_TOKEN(MTYPE, _gc_test)
9 #define mtype_is_filled		IPSET_TOKEN(MTYPE, _is_filled)
10 #define mtype_do_add		IPSET_TOKEN(MTYPE, _do_add)
11 #define mtype_ext_cleanup	IPSET_TOKEN(MTYPE, _ext_cleanup)
12 #define mtype_do_del		IPSET_TOKEN(MTYPE, _do_del)
13 #define mtype_do_list		IPSET_TOKEN(MTYPE, _do_list)
14 #define mtype_do_head		IPSET_TOKEN(MTYPE, _do_head)
15 #define mtype_adt_elem		IPSET_TOKEN(MTYPE, _adt_elem)
16 #define mtype_add_timeout	IPSET_TOKEN(MTYPE, _add_timeout)
17 #define mtype_gc_init		IPSET_TOKEN(MTYPE, _gc_init)
18 #define mtype_kadt		IPSET_TOKEN(MTYPE, _kadt)
19 #define mtype_uadt		IPSET_TOKEN(MTYPE, _uadt)
20 #define mtype_destroy		IPSET_TOKEN(MTYPE, _destroy)
21 #define mtype_memsize		IPSET_TOKEN(MTYPE, _memsize)
22 #define mtype_flush		IPSET_TOKEN(MTYPE, _flush)
23 #define mtype_head		IPSET_TOKEN(MTYPE, _head)
24 #define mtype_same_set		IPSET_TOKEN(MTYPE, _same_set)
25 #define mtype_elem		IPSET_TOKEN(MTYPE, _elem)
26 #define mtype_test		IPSET_TOKEN(MTYPE, _test)
27 #define mtype_add		IPSET_TOKEN(MTYPE, _add)
28 #define mtype_del		IPSET_TOKEN(MTYPE, _del)
29 #define mtype_list		IPSET_TOKEN(MTYPE, _list)
30 #define mtype_gc		IPSET_TOKEN(MTYPE, _gc)
31 #define mtype_cancel_gc		IPSET_TOKEN(MTYPE, _cancel_gc)
32 #define mtype			MTYPE
33 
34 #define get_ext(set, map, id)	((map)->extensions + ((set)->dsize * (id)))
35 
36 static void
mtype_gc_init(struct ip_set * set,void (* gc)(struct timer_list * t))37 mtype_gc_init(struct ip_set *set, void (*gc)(struct timer_list *t))
38 {
39 	struct mtype *map = set->data;
40 
41 	timer_setup(&map->gc, gc, 0);
42 	mod_timer(&map->gc, jiffies + IPSET_GC_PERIOD(set->timeout) * HZ);
43 }
44 
45 static void
mtype_ext_cleanup(struct ip_set * set)46 mtype_ext_cleanup(struct ip_set *set)
47 {
48 	struct mtype *map = set->data;
49 	u32 id;
50 
51 	for (id = 0; id < map->elements; id++)
52 		if (test_bit(id, map->members))
53 			ip_set_ext_destroy(set, get_ext(set, map, id));
54 }
55 
56 static void
mtype_destroy(struct ip_set * set)57 mtype_destroy(struct ip_set *set)
58 {
59 	struct mtype *map = set->data;
60 
61 	if (set->dsize && set->extensions & IPSET_EXT_DESTROY)
62 		mtype_ext_cleanup(set);
63 	ip_set_free(map->members);
64 	ip_set_free(map);
65 
66 	set->data = NULL;
67 }
68 
69 static void
mtype_flush(struct ip_set * set)70 mtype_flush(struct ip_set *set)
71 {
72 	struct mtype *map = set->data;
73 
74 	if (set->extensions & IPSET_EXT_DESTROY)
75 		mtype_ext_cleanup(set);
76 	bitmap_zero(map->members, map->elements);
77 	set->elements = 0;
78 	set->ext_size = 0;
79 }
80 
81 /* Calculate the actual memory size of the set data */
82 static size_t
mtype_memsize(const struct mtype * map,size_t dsize)83 mtype_memsize(const struct mtype *map, size_t dsize)
84 {
85 	return sizeof(*map) + map->memsize +
86 	       map->elements * dsize;
87 }
88 
89 static int
mtype_head(struct ip_set * set,struct sk_buff * skb)90 mtype_head(struct ip_set *set, struct sk_buff *skb)
91 {
92 	const struct mtype *map = set->data;
93 	struct nlattr *nested;
94 	size_t memsize = mtype_memsize(map, set->dsize) + set->ext_size;
95 
96 	nested = nla_nest_start(skb, IPSET_ATTR_DATA);
97 	if (!nested)
98 		goto nla_put_failure;
99 	if (mtype_do_head(skb, map) ||
100 	    nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) ||
101 	    nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) ||
102 	    nla_put_net32(skb, IPSET_ATTR_ELEMENTS, htonl(set->elements)))
103 		goto nla_put_failure;
104 	if (unlikely(ip_set_put_flags(skb, set)))
105 		goto nla_put_failure;
106 	nla_nest_end(skb, nested);
107 
108 	return 0;
109 nla_put_failure:
110 	return -EMSGSIZE;
111 }
112 
113 static int
mtype_test(struct ip_set * set,void * value,const struct ip_set_ext * ext,struct ip_set_ext * mext,u32 flags)114 mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
115 	   struct ip_set_ext *mext, u32 flags)
116 {
117 	struct mtype *map = set->data;
118 	const struct mtype_adt_elem *e = value;
119 	void *x = get_ext(set, map, e->id);
120 	int ret = mtype_do_test(e, map, set->dsize);
121 
122 	if (ret <= 0)
123 		return ret;
124 	return ip_set_match_extensions(set, ext, mext, flags, x);
125 }
126 
127 static int
mtype_add(struct ip_set * set,void * value,const struct ip_set_ext * ext,struct ip_set_ext * mext,u32 flags)128 mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
129 	  struct ip_set_ext *mext, u32 flags)
130 {
131 	struct mtype *map = set->data;
132 	const struct mtype_adt_elem *e = value;
133 	void *x = get_ext(set, map, e->id);
134 	int ret = mtype_do_add(e, map, flags, set->dsize);
135 
136 	if (ret == IPSET_ADD_FAILED) {
137 		if (SET_WITH_TIMEOUT(set) &&
138 		    ip_set_timeout_expired(ext_timeout(x, set))) {
139 			set->elements--;
140 			ret = 0;
141 		} else if (!(flags & IPSET_FLAG_EXIST)) {
142 			set_bit(e->id, map->members);
143 			return -IPSET_ERR_EXIST;
144 		}
145 		/* Element is re-added, cleanup extensions */
146 		ip_set_ext_destroy(set, x);
147 	}
148 	if (ret > 0)
149 		set->elements--;
150 
151 	if (SET_WITH_TIMEOUT(set))
152 #ifdef IP_SET_BITMAP_STORED_TIMEOUT
153 		mtype_add_timeout(ext_timeout(x, set), e, ext, set, map, ret);
154 #else
155 		ip_set_timeout_set(ext_timeout(x, set), ext->timeout);
156 #endif
157 
158 	if (SET_WITH_COUNTER(set))
159 		ip_set_init_counter(ext_counter(x, set), ext);
160 	if (SET_WITH_COMMENT(set))
161 		ip_set_init_comment(set, ext_comment(x, set), ext);
162 	if (SET_WITH_SKBINFO(set))
163 		ip_set_init_skbinfo(ext_skbinfo(x, set), ext);
164 
165 	/* Activate element */
166 	set_bit(e->id, map->members);
167 	set->elements++;
168 
169 	return 0;
170 }
171 
172 static int
mtype_del(struct ip_set * set,void * value,const struct ip_set_ext * ext,struct ip_set_ext * mext,u32 flags)173 mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
174 	  struct ip_set_ext *mext, u32 flags)
175 {
176 	struct mtype *map = set->data;
177 	const struct mtype_adt_elem *e = value;
178 	void *x = get_ext(set, map, e->id);
179 
180 	if (mtype_do_del(e, map))
181 		return -IPSET_ERR_EXIST;
182 
183 	ip_set_ext_destroy(set, x);
184 	set->elements--;
185 	if (SET_WITH_TIMEOUT(set) &&
186 	    ip_set_timeout_expired(ext_timeout(x, set)))
187 		return -IPSET_ERR_EXIST;
188 
189 	return 0;
190 }
191 
192 #ifndef IP_SET_BITMAP_STORED_TIMEOUT
193 static bool
mtype_is_filled(const struct mtype_elem * x)194 mtype_is_filled(const struct mtype_elem *x)
195 {
196 	return true;
197 }
198 #endif
199 
200 static int
mtype_list(const struct ip_set * set,struct sk_buff * skb,struct netlink_callback * cb)201 mtype_list(const struct ip_set *set,
202 	   struct sk_buff *skb, struct netlink_callback *cb)
203 {
204 	struct mtype *map = set->data;
205 	struct nlattr *adt, *nested;
206 	void *x;
207 	u32 id, first = cb->args[IPSET_CB_ARG0];
208 	int ret = 0;
209 
210 	adt = nla_nest_start(skb, IPSET_ATTR_ADT);
211 	if (!adt)
212 		return -EMSGSIZE;
213 	/* Extensions may be replaced */
214 	rcu_read_lock();
215 	for (; cb->args[IPSET_CB_ARG0] < map->elements;
216 	     cb->args[IPSET_CB_ARG0]++) {
217 		cond_resched_rcu();
218 		id = cb->args[IPSET_CB_ARG0];
219 		x = get_ext(set, map, id);
220 		if (!test_bit(id, map->members) ||
221 		    (SET_WITH_TIMEOUT(set) &&
222 #ifdef IP_SET_BITMAP_STORED_TIMEOUT
223 		     mtype_is_filled(x) &&
224 #endif
225 		     ip_set_timeout_expired(ext_timeout(x, set))))
226 			continue;
227 		nested = nla_nest_start(skb, IPSET_ATTR_DATA);
228 		if (!nested) {
229 			if (id == first) {
230 				nla_nest_cancel(skb, adt);
231 				ret = -EMSGSIZE;
232 				goto out;
233 			}
234 
235 			goto nla_put_failure;
236 		}
237 		if (mtype_do_list(skb, map, id, set->dsize))
238 			goto nla_put_failure;
239 		if (ip_set_put_extensions(skb, set, x, mtype_is_filled(x)))
240 			goto nla_put_failure;
241 		nla_nest_end(skb, nested);
242 	}
243 	nla_nest_end(skb, adt);
244 
245 	/* Set listing finished */
246 	cb->args[IPSET_CB_ARG0] = 0;
247 
248 	goto out;
249 
250 nla_put_failure:
251 	nla_nest_cancel(skb, nested);
252 	if (unlikely(id == first)) {
253 		cb->args[IPSET_CB_ARG0] = 0;
254 		ret = -EMSGSIZE;
255 	}
256 	nla_nest_end(skb, adt);
257 out:
258 	rcu_read_unlock();
259 	return ret;
260 }
261 
262 static void
mtype_gc(struct timer_list * t)263 mtype_gc(struct timer_list *t)
264 {
265 	struct mtype *map = from_timer(map, t, gc);
266 	struct ip_set *set = map->set;
267 	void *x;
268 	u32 id;
269 
270 	/* We run parallel with other readers (test element)
271 	 * but adding/deleting new entries is locked out
272 	 */
273 	spin_lock_bh(&set->lock);
274 	for (id = 0; id < map->elements; id++)
275 		if (mtype_gc_test(id, map, set->dsize)) {
276 			x = get_ext(set, map, id);
277 			if (ip_set_timeout_expired(ext_timeout(x, set))) {
278 				clear_bit(id, map->members);
279 				ip_set_ext_destroy(set, x);
280 				set->elements--;
281 			}
282 		}
283 	spin_unlock_bh(&set->lock);
284 
285 	map->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
286 	add_timer(&map->gc);
287 }
288 
289 static void
mtype_cancel_gc(struct ip_set * set)290 mtype_cancel_gc(struct ip_set *set)
291 {
292 	struct mtype *map = set->data;
293 
294 	if (SET_WITH_TIMEOUT(set))
295 		del_timer_sync(&map->gc);
296 }
297 
298 static const struct ip_set_type_variant mtype = {
299 	.kadt	= mtype_kadt,
300 	.uadt	= mtype_uadt,
301 	.adt	= {
302 		[IPSET_ADD] = mtype_add,
303 		[IPSET_DEL] = mtype_del,
304 		[IPSET_TEST] = mtype_test,
305 	},
306 	.destroy = mtype_destroy,
307 	.flush	= mtype_flush,
308 	.head	= mtype_head,
309 	.list	= mtype_list,
310 	.same_set = mtype_same_set,
311 	.cancel_gc = mtype_cancel_gc,
312 };
313 
314 #endif /* __IP_SET_BITMAP_IP_GEN_H */
315