1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BPF_CGROUP_H
3 #define _BPF_CGROUP_H
4
5 #include <linux/bpf.h>
6 #include <linux/bpf-cgroup-defs.h>
7 #include <linux/errno.h>
8 #include <linux/jump_label.h>
9 #include <linux/percpu.h>
10 #include <linux/rbtree.h>
11 #include <net/sock.h>
12 #include <uapi/linux/bpf.h>
13
14 struct sock;
15 struct sockaddr;
16 struct cgroup;
17 struct sk_buff;
18 struct bpf_map;
19 struct bpf_prog;
20 struct bpf_sock_ops_kern;
21 struct bpf_cgroup_storage;
22 struct ctl_table;
23 struct ctl_table_header;
24 struct task_struct;
25
26 unsigned int __cgroup_bpf_run_lsm_sock(const void *ctx,
27 const struct bpf_insn *insn);
28 unsigned int __cgroup_bpf_run_lsm_socket(const void *ctx,
29 const struct bpf_insn *insn);
30 unsigned int __cgroup_bpf_run_lsm_current(const void *ctx,
31 const struct bpf_insn *insn);
32
33 #ifdef CONFIG_CGROUP_BPF
34
35 #define CGROUP_ATYPE(type) \
36 case BPF_##type: return type
37
38 static inline enum cgroup_bpf_attach_type
to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)39 to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
40 {
41 switch (attach_type) {
42 CGROUP_ATYPE(CGROUP_INET_INGRESS);
43 CGROUP_ATYPE(CGROUP_INET_EGRESS);
44 CGROUP_ATYPE(CGROUP_INET_SOCK_CREATE);
45 CGROUP_ATYPE(CGROUP_SOCK_OPS);
46 CGROUP_ATYPE(CGROUP_DEVICE);
47 CGROUP_ATYPE(CGROUP_INET4_BIND);
48 CGROUP_ATYPE(CGROUP_INET6_BIND);
49 CGROUP_ATYPE(CGROUP_INET4_CONNECT);
50 CGROUP_ATYPE(CGROUP_INET6_CONNECT);
51 CGROUP_ATYPE(CGROUP_INET4_POST_BIND);
52 CGROUP_ATYPE(CGROUP_INET6_POST_BIND);
53 CGROUP_ATYPE(CGROUP_UDP4_SENDMSG);
54 CGROUP_ATYPE(CGROUP_UDP6_SENDMSG);
55 CGROUP_ATYPE(CGROUP_SYSCTL);
56 CGROUP_ATYPE(CGROUP_UDP4_RECVMSG);
57 CGROUP_ATYPE(CGROUP_UDP6_RECVMSG);
58 CGROUP_ATYPE(CGROUP_GETSOCKOPT);
59 CGROUP_ATYPE(CGROUP_SETSOCKOPT);
60 CGROUP_ATYPE(CGROUP_INET4_GETPEERNAME);
61 CGROUP_ATYPE(CGROUP_INET6_GETPEERNAME);
62 CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME);
63 CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME);
64 CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE);
65 default:
66 return CGROUP_BPF_ATTACH_TYPE_INVALID;
67 }
68 }
69
70 #undef CGROUP_ATYPE
71
72 extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE];
73 #define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype])
74
75 #define for_each_cgroup_storage_type(stype) \
76 for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
77
78 struct bpf_cgroup_storage_map;
79
80 struct bpf_storage_buffer {
81 struct rcu_head rcu;
82 char data[];
83 };
84
85 struct bpf_cgroup_storage {
86 union {
87 struct bpf_storage_buffer *buf;
88 void __percpu *percpu_buf;
89 };
90 struct bpf_cgroup_storage_map *map;
91 struct bpf_cgroup_storage_key key;
92 struct list_head list_map;
93 struct list_head list_cg;
94 struct rb_node node;
95 struct rcu_head rcu;
96 };
97
98 struct bpf_cgroup_link {
99 struct bpf_link link;
100 struct cgroup *cgroup;
101 enum bpf_attach_type type;
102 };
103
104 struct bpf_prog_list {
105 struct hlist_node node;
106 struct bpf_prog *prog;
107 struct bpf_cgroup_link *link;
108 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
109 u32 flags;
110 };
111
112 int cgroup_bpf_inherit(struct cgroup *cgrp);
113 void cgroup_bpf_offline(struct cgroup *cgrp);
114
115 int __cgroup_bpf_run_filter_skb(struct sock *sk,
116 struct sk_buff *skb,
117 enum cgroup_bpf_attach_type atype);
118
119 int __cgroup_bpf_run_filter_sk(struct sock *sk,
120 enum cgroup_bpf_attach_type atype);
121
122 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
123 struct sockaddr *uaddr,
124 int *uaddrlen,
125 enum cgroup_bpf_attach_type atype,
126 void *t_ctx,
127 u32 *flags);
128
129 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
130 struct bpf_sock_ops_kern *sock_ops,
131 enum cgroup_bpf_attach_type atype);
132
133 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
134 short access, enum cgroup_bpf_attach_type atype);
135
136 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
137 struct ctl_table *table, int write,
138 char **buf, size_t *pcount, loff_t *ppos,
139 enum cgroup_bpf_attach_type atype);
140
141 int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
142 int *optname, sockptr_t optval,
143 int *optlen, char **kernel_optval);
144
145 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
146 int optname, sockptr_t optval,
147 sockptr_t optlen, int max_optlen,
148 int retval);
149
150 int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
151 int optname, void *optval,
152 int *optlen, int retval);
153
cgroup_storage_type(struct bpf_map * map)154 static inline enum bpf_cgroup_storage_type cgroup_storage_type(
155 struct bpf_map *map)
156 {
157 if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
158 return BPF_CGROUP_STORAGE_PERCPU;
159
160 return BPF_CGROUP_STORAGE_SHARED;
161 }
162
163 struct bpf_cgroup_storage *
164 cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
165 void *key, bool locked);
166 struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
167 enum bpf_cgroup_storage_type stype);
168 void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
169 void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
170 struct cgroup *cgroup,
171 enum bpf_attach_type type);
172 void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
173 int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
174
175 int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
176 int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
177 void *value, u64 flags);
178
179 /* Opportunistic check to see whether we have any BPF program attached*/
cgroup_bpf_sock_enabled(struct sock * sk,enum cgroup_bpf_attach_type type)180 static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
181 enum cgroup_bpf_attach_type type)
182 {
183 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
184 struct bpf_prog_array *array;
185
186 array = rcu_access_pointer(cgrp->bpf.effective[type]);
187 return array != &bpf_empty_prog_array.hdr;
188 }
189
190 /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
191 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
192 ({ \
193 int __ret = 0; \
194 if (cgroup_bpf_enabled(CGROUP_INET_INGRESS) && \
195 cgroup_bpf_sock_enabled(sk, CGROUP_INET_INGRESS)) \
196 __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
197 CGROUP_INET_INGRESS); \
198 \
199 __ret; \
200 })
201
202 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
203 ({ \
204 int __ret = 0; \
205 if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk) { \
206 typeof(sk) __sk = sk_to_full_sk(sk); \
207 if (sk_fullsock(__sk) && __sk == skb_to_full_sk(skb) && \
208 cgroup_bpf_sock_enabled(__sk, CGROUP_INET_EGRESS)) \
209 __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
210 CGROUP_INET_EGRESS); \
211 } \
212 __ret; \
213 })
214
215 #define BPF_CGROUP_RUN_SK_PROG(sk, atype) \
216 ({ \
217 int __ret = 0; \
218 if (cgroup_bpf_enabled(atype)) { \
219 __ret = __cgroup_bpf_run_filter_sk(sk, atype); \
220 } \
221 __ret; \
222 })
223
224 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
225 BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_CREATE)
226
227 #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) \
228 BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_RELEASE)
229
230 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
231 BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET4_POST_BIND)
232
233 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
234 BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND)
235
236 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, atype) \
237 ({ \
238 int __ret = 0; \
239 if (cgroup_bpf_enabled(atype)) \
240 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \
241 atype, NULL, NULL); \
242 __ret; \
243 })
244
245 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, atype, t_ctx) \
246 ({ \
247 int __ret = 0; \
248 if (cgroup_bpf_enabled(atype)) { \
249 lock_sock(sk); \
250 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \
251 atype, t_ctx, NULL); \
252 release_sock(sk); \
253 } \
254 __ret; \
255 })
256
257 /* BPF_CGROUP_INET4_BIND and BPF_CGROUP_INET6_BIND can return extra flags
258 * via upper bits of return code. The only flag that is supported
259 * (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check
260 * should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE).
261 */
262 #define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, uaddrlen, atype, bind_flags) \
263 ({ \
264 u32 __flags = 0; \
265 int __ret = 0; \
266 if (cgroup_bpf_enabled(atype)) { \
267 lock_sock(sk); \
268 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \
269 atype, NULL, &__flags); \
270 release_sock(sk); \
271 if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE) \
272 *bind_flags |= BIND_NO_CAP_NET_BIND_SERVICE; \
273 } \
274 __ret; \
275 })
276
277 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) \
278 ((cgroup_bpf_enabled(CGROUP_INET4_CONNECT) || \
279 cgroup_bpf_enabled(CGROUP_INET6_CONNECT)) && \
280 (sk)->sk_prot->pre_connect)
281
282 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr, uaddrlen) \
283 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, CGROUP_INET4_CONNECT)
284
285 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, uaddrlen) \
286 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, CGROUP_INET6_CONNECT)
287
288 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, uaddrlen) \
289 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_INET4_CONNECT, NULL)
290
291 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, uaddrlen) \
292 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_INET6_CONNECT, NULL)
293
294 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) \
295 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP4_SENDMSG, t_ctx)
296
297 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) \
298 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP6_SENDMSG, t_ctx)
299
300 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr, uaddrlen) \
301 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP4_RECVMSG, NULL)
302
303 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr, uaddrlen) \
304 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP6_RECVMSG, NULL)
305
306 /* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a
307 * fullsock and its parent fullsock cannot be traced by
308 * sk_to_full_sk().
309 *
310 * e.g. sock_ops->sk is a request_sock and it is under syncookie mode.
311 * Its listener-sk is not attached to the rsk_listener.
312 * In this case, the caller holds the listener-sk (unlocked),
313 * set its sock_ops->sk to req_sk, and call this SOCK_OPS"_SK" with
314 * the listener-sk such that the cgroup-bpf-progs of the
315 * listener-sk will be run.
316 *
317 * Regardless of syncookie mode or not,
318 * calling bpf_setsockopt on listener-sk will not make sense anyway,
319 * so passing 'sock_ops->sk == req_sk' to the bpf prog is appropriate here.
320 */
321 #define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk) \
322 ({ \
323 int __ret = 0; \
324 if (cgroup_bpf_enabled(CGROUP_SOCK_OPS)) \
325 __ret = __cgroup_bpf_run_filter_sock_ops(sk, \
326 sock_ops, \
327 CGROUP_SOCK_OPS); \
328 __ret; \
329 })
330
331 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
332 ({ \
333 int __ret = 0; \
334 if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && (sock_ops)->sk) { \
335 typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
336 if (__sk && sk_fullsock(__sk)) \
337 __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
338 sock_ops, \
339 CGROUP_SOCK_OPS); \
340 } \
341 __ret; \
342 })
343
344 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) \
345 ({ \
346 int __ret = 0; \
347 if (cgroup_bpf_enabled(CGROUP_DEVICE)) \
348 __ret = __cgroup_bpf_check_dev_permission(atype, major, minor, \
349 access, \
350 CGROUP_DEVICE); \
351 \
352 __ret; \
353 })
354
355
356 #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \
357 ({ \
358 int __ret = 0; \
359 if (cgroup_bpf_enabled(CGROUP_SYSCTL)) \
360 __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \
361 buf, count, pos, \
362 CGROUP_SYSCTL); \
363 __ret; \
364 })
365
366 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
367 kernel_optval) \
368 ({ \
369 int __ret = 0; \
370 if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT) && \
371 cgroup_bpf_sock_enabled(sock, CGROUP_SETSOCKOPT)) \
372 __ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \
373 optname, optval, \
374 optlen, \
375 kernel_optval); \
376 __ret; \
377 })
378
379 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen, \
380 max_optlen, retval) \
381 ({ \
382 int __ret = retval; \
383 if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT) && \
384 cgroup_bpf_sock_enabled(sock, CGROUP_GETSOCKOPT)) \
385 if (!(sock)->sk_prot->bpf_bypass_getsockopt || \
386 !INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \
387 tcp_bpf_bypass_getsockopt, \
388 level, optname)) \
389 __ret = __cgroup_bpf_run_filter_getsockopt( \
390 sock, level, optname, optval, optlen, \
391 max_optlen, retval); \
392 __ret; \
393 })
394
395 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
396 optlen, retval) \
397 ({ \
398 int __ret = retval; \
399 if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
400 __ret = __cgroup_bpf_run_filter_getsockopt_kern( \
401 sock, level, optname, optval, optlen, retval); \
402 __ret; \
403 })
404
405 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
406 enum bpf_prog_type ptype, struct bpf_prog *prog);
407 int cgroup_bpf_prog_detach(const union bpf_attr *attr,
408 enum bpf_prog_type ptype);
409 int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
410 int cgroup_bpf_prog_query(const union bpf_attr *attr,
411 union bpf_attr __user *uattr);
412
413 const struct bpf_func_proto *
414 cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
415 const struct bpf_func_proto *
416 cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
417 #else
418
cgroup_bpf_inherit(struct cgroup * cgrp)419 static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
cgroup_bpf_offline(struct cgroup * cgrp)420 static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
421
cgroup_bpf_prog_attach(const union bpf_attr * attr,enum bpf_prog_type ptype,struct bpf_prog * prog)422 static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
423 enum bpf_prog_type ptype,
424 struct bpf_prog *prog)
425 {
426 return -EINVAL;
427 }
428
cgroup_bpf_prog_detach(const union bpf_attr * attr,enum bpf_prog_type ptype)429 static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
430 enum bpf_prog_type ptype)
431 {
432 return -EINVAL;
433 }
434
cgroup_bpf_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)435 static inline int cgroup_bpf_link_attach(const union bpf_attr *attr,
436 struct bpf_prog *prog)
437 {
438 return -EINVAL;
439 }
440
cgroup_bpf_prog_query(const union bpf_attr * attr,union bpf_attr __user * uattr)441 static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
442 union bpf_attr __user *uattr)
443 {
444 return -EINVAL;
445 }
446
447 static inline const struct bpf_func_proto *
cgroup_common_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)448 cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
449 {
450 return NULL;
451 }
452
453 static inline const struct bpf_func_proto *
cgroup_current_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)454 cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
455 {
456 return NULL;
457 }
458
bpf_cgroup_storage_assign(struct bpf_prog_aux * aux,struct bpf_map * map)459 static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
460 struct bpf_map *map) { return 0; }
bpf_cgroup_storage_alloc(struct bpf_prog * prog,enum bpf_cgroup_storage_type stype)461 static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
462 struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
bpf_cgroup_storage_free(struct bpf_cgroup_storage * storage)463 static inline void bpf_cgroup_storage_free(
464 struct bpf_cgroup_storage *storage) {}
bpf_percpu_cgroup_storage_copy(struct bpf_map * map,void * key,void * value)465 static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
466 void *value) {
467 return 0;
468 }
bpf_percpu_cgroup_storage_update(struct bpf_map * map,void * key,void * value,u64 flags)469 static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
470 void *key, void *value, u64 flags) {
471 return 0;
472 }
473
474 #define cgroup_bpf_enabled(atype) (0)
475 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, atype, t_ctx) ({ 0; })
476 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, atype) ({ 0; })
477 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
478 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
479 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
480 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
481 #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
482 #define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, uaddrlen, atype, flags) ({ 0; })
483 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
484 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
485 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr, uaddrlen) ({ 0; })
486 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; })
487 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, uaddrlen) ({ 0; })
488 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; })
489 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; })
490 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; })
491 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; })
492 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; })
493 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
494 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; })
495 #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
496 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
497 optlen, max_optlen, retval) ({ retval; })
498 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
499 optlen, retval) ({ retval; })
500 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
501 kernel_optval) ({ 0; })
502
503 #define for_each_cgroup_storage_type(stype) for (; false; )
504
505 #endif /* CONFIG_CGROUP_BPF */
506
507 #endif /* _BPF_CGROUP_H */
508