1 /* Structure dynamic extension infrastructure
2  * Copyright (C) 2004 Rusty Russell IBM Corporation
3  * Copyright (C) 2007 Netfilter Core Team <coreteam@netfilter.org>
4  * Copyright (C) 2007 USAGI/WIDE Project <http://www.linux-ipv6.org>
5  *
6  *      This program is free software; you can redistribute it and/or
7  *      modify it under the terms of the GNU General Public License
8  *      as published by the Free Software Foundation; either version
9  *      2 of the License, or (at your option) any later version.
10  */
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/rcupdate.h>
15 #include <linux/slab.h>
16 #include <linux/skbuff.h>
17 #include <net/netfilter/nf_conntrack_extend.h>
18 
19 static struct nf_ct_ext_type *nf_ct_ext_types[NF_CT_EXT_NUM];
20 static DEFINE_MUTEX(nf_ct_ext_type_mutex);
21 
22 void __nf_ct_ext_destroy(struct nf_conn *ct)
23 {
24 	unsigned int i;
25 	struct nf_ct_ext_type *t;
26 
27 	for (i = 0; i < NF_CT_EXT_NUM; i++) {
28 		if (!nf_ct_ext_exist(ct, i))
29 			continue;
30 
31 		rcu_read_lock();
32 		t = rcu_dereference(nf_ct_ext_types[i]);
33 
34 		/* Here the nf_ct_ext_type might have been unregisterd.
35 		 * I.e., it has responsible to cleanup private
36 		 * area in all conntracks when it is unregisterd.
37 		 */
38 		if (t && t->destroy)
39 			t->destroy(ct);
40 		rcu_read_unlock();
41 	}
42 }
43 EXPORT_SYMBOL(__nf_ct_ext_destroy);
44 
45 static void *
46 nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id, gfp_t gfp)
47 {
48 	unsigned int off, len;
49 	struct nf_ct_ext_type *t;
50 
51 	rcu_read_lock();
52 	t = rcu_dereference(nf_ct_ext_types[id]);
53 	BUG_ON(t == NULL);
54 	off = ALIGN(sizeof(struct nf_ct_ext), t->align);
55 	len = off + t->len;
56 	rcu_read_unlock();
57 
58 	*ext = kzalloc(t->alloc_size, gfp);
59 	if (!*ext)
60 		return NULL;
61 
62 	(*ext)->offset[id] = off;
63 	(*ext)->len = len;
64 
65 	return (void *)(*ext) + off;
66 }
67 
68 static void __nf_ct_ext_free_rcu(struct rcu_head *head)
69 {
70 	struct nf_ct_ext *ext = container_of(head, struct nf_ct_ext, rcu);
71 	kfree(ext);
72 }
73 
74 void *__nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp)
75 {
76 	struct nf_ct_ext *new;
77 	int i, newlen, newoff;
78 	struct nf_ct_ext_type *t;
79 
80 	/* Conntrack must not be confirmed to avoid races on reallocation. */
81 	NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
82 
83 	if (!ct->ext)
84 		return nf_ct_ext_create(&ct->ext, id, gfp);
85 
86 	if (nf_ct_ext_exist(ct, id))
87 		return NULL;
88 
89 	rcu_read_lock();
90 	t = rcu_dereference(nf_ct_ext_types[id]);
91 	BUG_ON(t == NULL);
92 
93 	newoff = ALIGN(ct->ext->len, t->align);
94 	newlen = newoff + t->len;
95 	rcu_read_unlock();
96 
97 	new = __krealloc(ct->ext, newlen, gfp);
98 	if (!new)
99 		return NULL;
100 
101 	if (new != ct->ext) {
102 		for (i = 0; i < NF_CT_EXT_NUM; i++) {
103 			if (!nf_ct_ext_exist(ct, i))
104 				continue;
105 
106 			rcu_read_lock();
107 			t = rcu_dereference(nf_ct_ext_types[i]);
108 			if (t && t->move)
109 				t->move((void *)new + new->offset[i],
110 					(void *)ct->ext + ct->ext->offset[i]);
111 			rcu_read_unlock();
112 		}
113 		call_rcu(&ct->ext->rcu, __nf_ct_ext_free_rcu);
114 		ct->ext = new;
115 	}
116 
117 	new->offset[id] = newoff;
118 	new->len = newlen;
119 	memset((void *)new + newoff, 0, newlen - newoff);
120 	return (void *)new + newoff;
121 }
122 EXPORT_SYMBOL(__nf_ct_ext_add);
123 
124 static void update_alloc_size(struct nf_ct_ext_type *type)
125 {
126 	int i, j;
127 	struct nf_ct_ext_type *t1, *t2;
128 	enum nf_ct_ext_id min = 0, max = NF_CT_EXT_NUM - 1;
129 
130 	/* unnecessary to update all types */
131 	if ((type->flags & NF_CT_EXT_F_PREALLOC) == 0) {
132 		min = type->id;
133 		max = type->id;
134 	}
135 
136 	/* This assumes that extended areas in conntrack for the types
137 	   whose NF_CT_EXT_F_PREALLOC bit set are allocated in order */
138 	for (i = min; i <= max; i++) {
139 		t1 = nf_ct_ext_types[i];
140 		if (!t1)
141 			continue;
142 
143 		t1->alloc_size = sizeof(struct nf_ct_ext)
144 				 + ALIGN(sizeof(struct nf_ct_ext), t1->align)
145 				 + t1->len;
146 		for (j = 0; j < NF_CT_EXT_NUM; j++) {
147 			t2 = nf_ct_ext_types[j];
148 			if (t2 == NULL || t2 == t1 ||
149 			    (t2->flags & NF_CT_EXT_F_PREALLOC) == 0)
150 				continue;
151 
152 			t1->alloc_size = ALIGN(t1->alloc_size, t2->align)
153 					 + t2->len;
154 		}
155 	}
156 }
157 
158 /* This MUST be called in process context. */
159 int nf_ct_extend_register(struct nf_ct_ext_type *type)
160 {
161 	int ret = 0;
162 
163 	mutex_lock(&nf_ct_ext_type_mutex);
164 	if (nf_ct_ext_types[type->id]) {
165 		ret = -EBUSY;
166 		goto out;
167 	}
168 
169 	/* This ensures that nf_ct_ext_create() can allocate enough area
170 	   before updating alloc_size */
171 	type->alloc_size = ALIGN(sizeof(struct nf_ct_ext), type->align)
172 			   + type->len;
173 	rcu_assign_pointer(nf_ct_ext_types[type->id], type);
174 	update_alloc_size(type);
175 out:
176 	mutex_unlock(&nf_ct_ext_type_mutex);
177 	return ret;
178 }
179 EXPORT_SYMBOL_GPL(nf_ct_extend_register);
180 
181 /* This MUST be called in process context. */
182 void nf_ct_extend_unregister(struct nf_ct_ext_type *type)
183 {
184 	mutex_lock(&nf_ct_ext_type_mutex);
185 	rcu_assign_pointer(nf_ct_ext_types[type->id], NULL);
186 	update_alloc_size(type);
187 	mutex_unlock(&nf_ct_ext_type_mutex);
188 	rcu_barrier(); /* Wait for completion of call_rcu()'s */
189 }
190 EXPORT_SYMBOL_GPL(nf_ct_extend_unregister);
191