1 #ifndef _NF_FLOW_TABLE_H
2 #define _NF_FLOW_TABLE_H
3
4 #include <linux/in.h>
5 #include <linux/in6.h>
6 #include <linux/netdevice.h>
7 #include <linux/rhashtable-types.h>
8 #include <linux/rcupdate.h>
9 #include <linux/netfilter.h>
10 #include <linux/netfilter/nf_conntrack_tuple_common.h>
11 #include <net/flow_offload.h>
12 #include <net/dst.h>
13 #include <linux/if_pppox.h>
14 #include <linux/ppp_defs.h>
15
16 struct nf_flowtable;
17 struct nf_flow_rule;
18 struct flow_offload;
19 enum flow_offload_tuple_dir;
20
21 struct nf_flow_key {
22 struct flow_dissector_key_meta meta;
23 struct flow_dissector_key_control control;
24 struct flow_dissector_key_control enc_control;
25 struct flow_dissector_key_basic basic;
26 struct flow_dissector_key_vlan vlan;
27 struct flow_dissector_key_vlan cvlan;
28 union {
29 struct flow_dissector_key_ipv4_addrs ipv4;
30 struct flow_dissector_key_ipv6_addrs ipv6;
31 };
32 struct flow_dissector_key_keyid enc_key_id;
33 union {
34 struct flow_dissector_key_ipv4_addrs enc_ipv4;
35 struct flow_dissector_key_ipv6_addrs enc_ipv6;
36 };
37 struct flow_dissector_key_tcp tcp;
38 struct flow_dissector_key_ports tp;
39 } __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
40
41 struct nf_flow_match {
42 struct flow_dissector dissector;
43 struct nf_flow_key key;
44 struct nf_flow_key mask;
45 };
46
47 struct nf_flow_rule {
48 struct nf_flow_match match;
49 struct flow_rule *rule;
50 };
51
52 struct nf_flowtable_type {
53 struct list_head list;
54 int family;
55 int (*init)(struct nf_flowtable *ft);
56 bool (*gc)(const struct flow_offload *flow);
57 int (*setup)(struct nf_flowtable *ft,
58 struct net_device *dev,
59 enum flow_block_command cmd);
60 int (*action)(struct net *net,
61 struct flow_offload *flow,
62 enum flow_offload_tuple_dir dir,
63 struct nf_flow_rule *flow_rule);
64 void (*free)(struct nf_flowtable *ft);
65 void (*get)(struct nf_flowtable *ft);
66 void (*put)(struct nf_flowtable *ft);
67 nf_hookfn *hook;
68 struct module *owner;
69 };
70
71 enum nf_flowtable_flags {
72 NF_FLOWTABLE_HW_OFFLOAD = 0x1, /* NFT_FLOWTABLE_HW_OFFLOAD */
73 NF_FLOWTABLE_COUNTER = 0x2, /* NFT_FLOWTABLE_COUNTER */
74 };
75
76 struct nf_flowtable {
77 struct list_head list;
78 struct rhashtable rhashtable;
79 int priority;
80 const struct nf_flowtable_type *type;
81 struct delayed_work gc_work;
82 unsigned int flags;
83 struct flow_block flow_block;
84 struct rw_semaphore flow_block_lock; /* Guards flow_block */
85 possible_net_t net;
86 };
87
nf_flowtable_hw_offload(struct nf_flowtable * flowtable)88 static inline bool nf_flowtable_hw_offload(struct nf_flowtable *flowtable)
89 {
90 return flowtable->flags & NF_FLOWTABLE_HW_OFFLOAD;
91 }
92
93 enum flow_offload_tuple_dir {
94 FLOW_OFFLOAD_DIR_ORIGINAL = IP_CT_DIR_ORIGINAL,
95 FLOW_OFFLOAD_DIR_REPLY = IP_CT_DIR_REPLY,
96 };
97 #define FLOW_OFFLOAD_DIR_MAX IP_CT_DIR_MAX
98
99 enum flow_offload_xmit_type {
100 FLOW_OFFLOAD_XMIT_UNSPEC = 0,
101 FLOW_OFFLOAD_XMIT_NEIGH,
102 FLOW_OFFLOAD_XMIT_XFRM,
103 FLOW_OFFLOAD_XMIT_DIRECT,
104 FLOW_OFFLOAD_XMIT_TC,
105 };
106
107 #define NF_FLOW_TABLE_ENCAP_MAX 2
108
109 struct flow_offload_tuple {
110 union {
111 struct in_addr src_v4;
112 struct in6_addr src_v6;
113 };
114 union {
115 struct in_addr dst_v4;
116 struct in6_addr dst_v6;
117 };
118 struct {
119 __be16 src_port;
120 __be16 dst_port;
121 };
122
123 int iifidx;
124
125 u8 l3proto;
126 u8 l4proto;
127 struct {
128 u16 id;
129 __be16 proto;
130 } encap[NF_FLOW_TABLE_ENCAP_MAX];
131
132 /* All members above are keys for lookups, see flow_offload_hash(). */
133 struct { } __hash;
134
135 u8 dir:2,
136 xmit_type:3,
137 encap_num:2,
138 in_vlan_ingress:2;
139 u16 mtu;
140 union {
141 struct {
142 struct dst_entry *dst_cache;
143 u32 dst_cookie;
144 };
145 struct {
146 u32 ifidx;
147 u32 hw_ifidx;
148 u8 h_source[ETH_ALEN];
149 u8 h_dest[ETH_ALEN];
150 } out;
151 struct {
152 u32 iifidx;
153 } tc;
154 };
155 };
156
157 struct flow_offload_tuple_rhash {
158 struct rhash_head node;
159 struct flow_offload_tuple tuple;
160 };
161
162 enum nf_flow_flags {
163 NF_FLOW_SNAT,
164 NF_FLOW_DNAT,
165 NF_FLOW_TEARDOWN,
166 NF_FLOW_HW,
167 NF_FLOW_HW_DYING,
168 NF_FLOW_HW_DEAD,
169 NF_FLOW_HW_PENDING,
170 NF_FLOW_HW_BIDIRECTIONAL,
171 NF_FLOW_HW_ESTABLISHED,
172 };
173
174 enum flow_offload_type {
175 NF_FLOW_OFFLOAD_UNSPEC = 0,
176 NF_FLOW_OFFLOAD_ROUTE,
177 };
178
179 struct flow_offload {
180 struct flow_offload_tuple_rhash tuplehash[FLOW_OFFLOAD_DIR_MAX];
181 struct nf_conn *ct;
182 unsigned long flags;
183 u16 type;
184 u32 timeout;
185 struct rcu_head rcu_head;
186 };
187
188 #define NF_FLOW_TIMEOUT (30 * HZ)
189 #define nf_flowtable_time_stamp (u32)jiffies
190
191 unsigned long flow_offload_get_timeout(struct flow_offload *flow);
192
nf_flow_timeout_delta(unsigned int timeout)193 static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
194 {
195 return (__s32)(timeout - nf_flowtable_time_stamp);
196 }
197
198 struct nf_flow_route {
199 struct {
200 struct dst_entry *dst;
201 struct {
202 u32 ifindex;
203 struct {
204 u16 id;
205 __be16 proto;
206 } encap[NF_FLOW_TABLE_ENCAP_MAX];
207 u8 num_encaps:2,
208 ingress_vlans:2;
209 } in;
210 struct {
211 u32 ifindex;
212 u32 hw_ifindex;
213 u8 h_source[ETH_ALEN];
214 u8 h_dest[ETH_ALEN];
215 } out;
216 enum flow_offload_xmit_type xmit_type;
217 } tuple[FLOW_OFFLOAD_DIR_MAX];
218 };
219
220 struct flow_offload *flow_offload_alloc(struct nf_conn *ct);
221 void flow_offload_free(struct flow_offload *flow);
222
223 static inline int
nf_flow_table_offload_add_cb(struct nf_flowtable * flow_table,flow_setup_cb_t * cb,void * cb_priv)224 nf_flow_table_offload_add_cb(struct nf_flowtable *flow_table,
225 flow_setup_cb_t *cb, void *cb_priv)
226 {
227 struct flow_block *block = &flow_table->flow_block;
228 struct flow_block_cb *block_cb;
229 int err = 0;
230
231 down_write(&flow_table->flow_block_lock);
232 block_cb = flow_block_cb_lookup(block, cb, cb_priv);
233 if (block_cb) {
234 err = -EEXIST;
235 goto unlock;
236 }
237
238 block_cb = flow_block_cb_alloc(cb, cb_priv, cb_priv, NULL);
239 if (IS_ERR(block_cb)) {
240 err = PTR_ERR(block_cb);
241 goto unlock;
242 }
243
244 list_add_tail(&block_cb->list, &block->cb_list);
245 up_write(&flow_table->flow_block_lock);
246
247 if (flow_table->type->get)
248 flow_table->type->get(flow_table);
249 return 0;
250
251 unlock:
252 up_write(&flow_table->flow_block_lock);
253 return err;
254 }
255
256 static inline void
nf_flow_table_offload_del_cb(struct nf_flowtable * flow_table,flow_setup_cb_t * cb,void * cb_priv)257 nf_flow_table_offload_del_cb(struct nf_flowtable *flow_table,
258 flow_setup_cb_t *cb, void *cb_priv)
259 {
260 struct flow_block *block = &flow_table->flow_block;
261 struct flow_block_cb *block_cb;
262
263 down_write(&flow_table->flow_block_lock);
264 block_cb = flow_block_cb_lookup(block, cb, cb_priv);
265 if (block_cb) {
266 list_del(&block_cb->list);
267 flow_block_cb_free(block_cb);
268 } else {
269 WARN_ON(true);
270 }
271 up_write(&flow_table->flow_block_lock);
272
273 if (flow_table->type->put)
274 flow_table->type->put(flow_table);
275 }
276
277 void flow_offload_route_init(struct flow_offload *flow,
278 struct nf_flow_route *route);
279
280 int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow);
281 void flow_offload_refresh(struct nf_flowtable *flow_table,
282 struct flow_offload *flow, bool force);
283
284 struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table,
285 struct flow_offload_tuple *tuple);
286 void nf_flow_table_gc_run(struct nf_flowtable *flow_table);
287 void nf_flow_table_gc_cleanup(struct nf_flowtable *flowtable,
288 struct net_device *dev);
289 void nf_flow_table_cleanup(struct net_device *dev);
290
291 int nf_flow_table_init(struct nf_flowtable *flow_table);
292 void nf_flow_table_free(struct nf_flowtable *flow_table);
293
294 void flow_offload_teardown(struct flow_offload *flow);
295
296 void nf_flow_snat_port(const struct flow_offload *flow,
297 struct sk_buff *skb, unsigned int thoff,
298 u8 protocol, enum flow_offload_tuple_dir dir);
299 void nf_flow_dnat_port(const struct flow_offload *flow,
300 struct sk_buff *skb, unsigned int thoff,
301 u8 protocol, enum flow_offload_tuple_dir dir);
302
303 struct flow_ports {
304 __be16 source, dest;
305 };
306
307 unsigned int nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
308 const struct nf_hook_state *state);
309 unsigned int nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
310 const struct nf_hook_state *state);
311
312 #define MODULE_ALIAS_NF_FLOWTABLE(family) \
313 MODULE_ALIAS("nf-flowtable-" __stringify(family))
314
315 void nf_flow_offload_add(struct nf_flowtable *flowtable,
316 struct flow_offload *flow);
317 void nf_flow_offload_del(struct nf_flowtable *flowtable,
318 struct flow_offload *flow);
319 void nf_flow_offload_stats(struct nf_flowtable *flowtable,
320 struct flow_offload *flow);
321
322 void nf_flow_table_offload_flush(struct nf_flowtable *flowtable);
323 void nf_flow_table_offload_flush_cleanup(struct nf_flowtable *flowtable);
324
325 int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
326 struct net_device *dev,
327 enum flow_block_command cmd);
328 int nf_flow_rule_route_ipv4(struct net *net, struct flow_offload *flow,
329 enum flow_offload_tuple_dir dir,
330 struct nf_flow_rule *flow_rule);
331 int nf_flow_rule_route_ipv6(struct net *net, struct flow_offload *flow,
332 enum flow_offload_tuple_dir dir,
333 struct nf_flow_rule *flow_rule);
334
335 int nf_flow_table_offload_init(void);
336 void nf_flow_table_offload_exit(void);
337
__nf_flow_pppoe_proto(const struct sk_buff * skb)338 static inline __be16 __nf_flow_pppoe_proto(const struct sk_buff *skb)
339 {
340 __be16 proto;
341
342 proto = *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
343 sizeof(struct pppoe_hdr)));
344 switch (proto) {
345 case htons(PPP_IP):
346 return htons(ETH_P_IP);
347 case htons(PPP_IPV6):
348 return htons(ETH_P_IPV6);
349 }
350
351 return 0;
352 }
353
nf_flow_pppoe_proto(struct sk_buff * skb,__be16 * inner_proto)354 static inline bool nf_flow_pppoe_proto(struct sk_buff *skb, __be16 *inner_proto)
355 {
356 if (!pskb_may_pull(skb, PPPOE_SES_HLEN))
357 return false;
358
359 *inner_proto = __nf_flow_pppoe_proto(skb);
360
361 return true;
362 }
363
364 #define NF_FLOW_TABLE_STAT_INC(net, count) __this_cpu_inc((net)->ft.stat->count)
365 #define NF_FLOW_TABLE_STAT_DEC(net, count) __this_cpu_dec((net)->ft.stat->count)
366 #define NF_FLOW_TABLE_STAT_INC_ATOMIC(net, count) \
367 this_cpu_inc((net)->ft.stat->count)
368 #define NF_FLOW_TABLE_STAT_DEC_ATOMIC(net, count) \
369 this_cpu_dec((net)->ft.stat->count)
370
371 #ifdef CONFIG_NF_FLOW_TABLE_PROCFS
372 int nf_flow_table_init_proc(struct net *net);
373 void nf_flow_table_fini_proc(struct net *net);
374 #else
nf_flow_table_init_proc(struct net * net)375 static inline int nf_flow_table_init_proc(struct net *net)
376 {
377 return 0;
378 }
379
nf_flow_table_fini_proc(struct net * net)380 static inline void nf_flow_table_fini_proc(struct net *net)
381 {
382 }
383 #endif /* CONFIG_NF_FLOW_TABLE_PROCFS */
384
385 #endif /* _NF_FLOW_TABLE_H */
386