1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BPF_CGROUP_H
3 #define _BPF_CGROUP_H
4
5 #include <linux/bpf.h>
6 #include <linux/bpf-cgroup-defs.h>
7 #include <linux/errno.h>
8 #include <linux/jump_label.h>
9 #include <linux/percpu.h>
10 #include <linux/rbtree.h>
11 #include <net/sock.h>
12 #include <uapi/linux/bpf.h>
13
14 struct sock;
15 struct sockaddr;
16 struct cgroup;
17 struct sk_buff;
18 struct bpf_map;
19 struct bpf_prog;
20 struct bpf_sock_ops_kern;
21 struct bpf_cgroup_storage;
22 struct ctl_table;
23 struct ctl_table_header;
24 struct task_struct;
25
26 unsigned int __cgroup_bpf_run_lsm_sock(const void *ctx,
27 const struct bpf_insn *insn);
28 unsigned int __cgroup_bpf_run_lsm_socket(const void *ctx,
29 const struct bpf_insn *insn);
30 unsigned int __cgroup_bpf_run_lsm_current(const void *ctx,
31 const struct bpf_insn *insn);
32
33 #ifdef CONFIG_CGROUP_BPF
34
35 #define CGROUP_ATYPE(type) \
36 case BPF_##type: return type
37
38 static inline enum cgroup_bpf_attach_type
to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)39 to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
40 {
41 switch (attach_type) {
42 CGROUP_ATYPE(CGROUP_INET_INGRESS);
43 CGROUP_ATYPE(CGROUP_INET_EGRESS);
44 CGROUP_ATYPE(CGROUP_INET_SOCK_CREATE);
45 CGROUP_ATYPE(CGROUP_SOCK_OPS);
46 CGROUP_ATYPE(CGROUP_DEVICE);
47 CGROUP_ATYPE(CGROUP_INET4_BIND);
48 CGROUP_ATYPE(CGROUP_INET6_BIND);
49 CGROUP_ATYPE(CGROUP_INET4_CONNECT);
50 CGROUP_ATYPE(CGROUP_INET6_CONNECT);
51 CGROUP_ATYPE(CGROUP_INET4_POST_BIND);
52 CGROUP_ATYPE(CGROUP_INET6_POST_BIND);
53 CGROUP_ATYPE(CGROUP_UDP4_SENDMSG);
54 CGROUP_ATYPE(CGROUP_UDP6_SENDMSG);
55 CGROUP_ATYPE(CGROUP_SYSCTL);
56 CGROUP_ATYPE(CGROUP_UDP4_RECVMSG);
57 CGROUP_ATYPE(CGROUP_UDP6_RECVMSG);
58 CGROUP_ATYPE(CGROUP_GETSOCKOPT);
59 CGROUP_ATYPE(CGROUP_SETSOCKOPT);
60 CGROUP_ATYPE(CGROUP_INET4_GETPEERNAME);
61 CGROUP_ATYPE(CGROUP_INET6_GETPEERNAME);
62 CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME);
63 CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME);
64 CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE);
65 default:
66 return CGROUP_BPF_ATTACH_TYPE_INVALID;
67 }
68 }
69
70 #undef CGROUP_ATYPE
71
72 extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE];
73 #define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype])
74
75 #define for_each_cgroup_storage_type(stype) \
76 for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
77
78 struct bpf_cgroup_storage_map;
79
80 struct bpf_storage_buffer {
81 struct rcu_head rcu;
82 char data[];
83 };
84
85 struct bpf_cgroup_storage {
86 union {
87 struct bpf_storage_buffer *buf;
88 void __percpu *percpu_buf;
89 };
90 struct bpf_cgroup_storage_map *map;
91 struct bpf_cgroup_storage_key key;
92 struct list_head list_map;
93 struct list_head list_cg;
94 struct rb_node node;
95 struct rcu_head rcu;
96 };
97
98 struct bpf_cgroup_link {
99 struct bpf_link link;
100 struct cgroup *cgroup;
101 enum bpf_attach_type type;
102 };
103
104 struct bpf_prog_list {
105 struct hlist_node node;
106 struct bpf_prog *prog;
107 struct bpf_cgroup_link *link;
108 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
109 };
110
111 int cgroup_bpf_inherit(struct cgroup *cgrp);
112 void cgroup_bpf_offline(struct cgroup *cgrp);
113
114 int __cgroup_bpf_run_filter_skb(struct sock *sk,
115 struct sk_buff *skb,
116 enum cgroup_bpf_attach_type atype);
117
118 int __cgroup_bpf_run_filter_sk(struct sock *sk,
119 enum cgroup_bpf_attach_type atype);
120
121 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
122 struct sockaddr *uaddr,
123 int *uaddrlen,
124 enum cgroup_bpf_attach_type atype,
125 void *t_ctx,
126 u32 *flags);
127
128 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
129 struct bpf_sock_ops_kern *sock_ops,
130 enum cgroup_bpf_attach_type atype);
131
132 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
133 short access, enum cgroup_bpf_attach_type atype);
134
135 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
136 struct ctl_table *table, int write,
137 char **buf, size_t *pcount, loff_t *ppos,
138 enum cgroup_bpf_attach_type atype);
139
140 int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
141 int *optname, sockptr_t optval,
142 int *optlen, char **kernel_optval);
143
144 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
145 int optname, sockptr_t optval,
146 sockptr_t optlen, int max_optlen,
147 int retval);
148
149 int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
150 int optname, void *optval,
151 int *optlen, int retval);
152
cgroup_storage_type(struct bpf_map * map)153 static inline enum bpf_cgroup_storage_type cgroup_storage_type(
154 struct bpf_map *map)
155 {
156 if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE)
157 return BPF_CGROUP_STORAGE_PERCPU;
158
159 return BPF_CGROUP_STORAGE_SHARED;
160 }
161
162 struct bpf_cgroup_storage *
163 cgroup_storage_lookup(struct bpf_cgroup_storage_map *map,
164 void *key, bool locked);
165 struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
166 enum bpf_cgroup_storage_type stype);
167 void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
168 void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
169 struct cgroup *cgroup,
170 enum bpf_attach_type type);
171 void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
172 int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
173
174 int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
175 int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
176 void *value, u64 flags);
177
178 /* Opportunistic check to see whether we have any BPF program attached*/
cgroup_bpf_sock_enabled(struct sock * sk,enum cgroup_bpf_attach_type type)179 static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
180 enum cgroup_bpf_attach_type type)
181 {
182 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
183 struct bpf_prog_array *array;
184
185 array = rcu_access_pointer(cgrp->bpf.effective[type]);
186 return array != &bpf_empty_prog_array.hdr;
187 }
188
189 /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
190 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
191 ({ \
192 int __ret = 0; \
193 if (cgroup_bpf_enabled(CGROUP_INET_INGRESS) && \
194 cgroup_bpf_sock_enabled(sk, CGROUP_INET_INGRESS)) \
195 __ret = __cgroup_bpf_run_filter_skb(sk, skb, \
196 CGROUP_INET_INGRESS); \
197 \
198 __ret; \
199 })
200
201 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \
202 ({ \
203 int __ret = 0; \
204 if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk) { \
205 typeof(sk) __sk = sk_to_full_sk(sk); \
206 if (sk_fullsock(__sk) && __sk == skb_to_full_sk(skb) && \
207 cgroup_bpf_sock_enabled(__sk, CGROUP_INET_EGRESS)) \
208 __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \
209 CGROUP_INET_EGRESS); \
210 } \
211 __ret; \
212 })
213
214 #define BPF_CGROUP_RUN_SK_PROG(sk, atype) \
215 ({ \
216 int __ret = 0; \
217 if (cgroup_bpf_enabled(atype)) { \
218 __ret = __cgroup_bpf_run_filter_sk(sk, atype); \
219 } \
220 __ret; \
221 })
222
223 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \
224 BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_CREATE)
225
226 #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) \
227 BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_RELEASE)
228
229 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \
230 BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET4_POST_BIND)
231
232 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
233 BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND)
234
235 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, atype) \
236 ({ \
237 int __ret = 0; \
238 if (cgroup_bpf_enabled(atype)) \
239 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \
240 atype, NULL, NULL); \
241 __ret; \
242 })
243
244 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, atype, t_ctx) \
245 ({ \
246 int __ret = 0; \
247 if (cgroup_bpf_enabled(atype)) { \
248 lock_sock(sk); \
249 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \
250 atype, t_ctx, NULL); \
251 release_sock(sk); \
252 } \
253 __ret; \
254 })
255
256 /* BPF_CGROUP_INET4_BIND and BPF_CGROUP_INET6_BIND can return extra flags
257 * via upper bits of return code. The only flag that is supported
258 * (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check
259 * should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE).
260 */
261 #define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, uaddrlen, atype, bind_flags) \
262 ({ \
263 u32 __flags = 0; \
264 int __ret = 0; \
265 if (cgroup_bpf_enabled(atype)) { \
266 lock_sock(sk); \
267 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \
268 atype, NULL, &__flags); \
269 release_sock(sk); \
270 if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE) \
271 *bind_flags |= BIND_NO_CAP_NET_BIND_SERVICE; \
272 } \
273 __ret; \
274 })
275
276 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) \
277 ((cgroup_bpf_enabled(CGROUP_INET4_CONNECT) || \
278 cgroup_bpf_enabled(CGROUP_INET6_CONNECT)) && \
279 (sk)->sk_prot->pre_connect)
280
281 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr, uaddrlen) \
282 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, CGROUP_INET4_CONNECT)
283
284 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, uaddrlen) \
285 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, CGROUP_INET6_CONNECT)
286
287 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, uaddrlen) \
288 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_INET4_CONNECT, NULL)
289
290 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, uaddrlen) \
291 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_INET6_CONNECT, NULL)
292
293 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) \
294 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP4_SENDMSG, t_ctx)
295
296 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) \
297 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP6_SENDMSG, t_ctx)
298
299 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr, uaddrlen) \
300 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP4_RECVMSG, NULL)
301
302 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr, uaddrlen) \
303 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP6_RECVMSG, NULL)
304
305 /* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a
306 * fullsock and its parent fullsock cannot be traced by
307 * sk_to_full_sk().
308 *
309 * e.g. sock_ops->sk is a request_sock and it is under syncookie mode.
310 * Its listener-sk is not attached to the rsk_listener.
311 * In this case, the caller holds the listener-sk (unlocked),
312 * set its sock_ops->sk to req_sk, and call this SOCK_OPS"_SK" with
313 * the listener-sk such that the cgroup-bpf-progs of the
314 * listener-sk will be run.
315 *
316 * Regardless of syncookie mode or not,
317 * calling bpf_setsockopt on listener-sk will not make sense anyway,
318 * so passing 'sock_ops->sk == req_sk' to the bpf prog is appropriate here.
319 */
320 #define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk) \
321 ({ \
322 int __ret = 0; \
323 if (cgroup_bpf_enabled(CGROUP_SOCK_OPS)) \
324 __ret = __cgroup_bpf_run_filter_sock_ops(sk, \
325 sock_ops, \
326 CGROUP_SOCK_OPS); \
327 __ret; \
328 })
329
330 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \
331 ({ \
332 int __ret = 0; \
333 if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && (sock_ops)->sk) { \
334 typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \
335 if (__sk && sk_fullsock(__sk)) \
336 __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \
337 sock_ops, \
338 CGROUP_SOCK_OPS); \
339 } \
340 __ret; \
341 })
342
343 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) \
344 ({ \
345 int __ret = 0; \
346 if (cgroup_bpf_enabled(CGROUP_DEVICE)) \
347 __ret = __cgroup_bpf_check_dev_permission(atype, major, minor, \
348 access, \
349 CGROUP_DEVICE); \
350 \
351 __ret; \
352 })
353
354
355 #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \
356 ({ \
357 int __ret = 0; \
358 if (cgroup_bpf_enabled(CGROUP_SYSCTL)) \
359 __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \
360 buf, count, pos, \
361 CGROUP_SYSCTL); \
362 __ret; \
363 })
364
365 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
366 kernel_optval) \
367 ({ \
368 int __ret = 0; \
369 if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT) && \
370 cgroup_bpf_sock_enabled(sock, CGROUP_SETSOCKOPT)) \
371 __ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \
372 optname, optval, \
373 optlen, \
374 kernel_optval); \
375 __ret; \
376 })
377
378 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen, \
379 max_optlen, retval) \
380 ({ \
381 int __ret = retval; \
382 if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT) && \
383 cgroup_bpf_sock_enabled(sock, CGROUP_GETSOCKOPT)) \
384 if (!(sock)->sk_prot->bpf_bypass_getsockopt || \
385 !INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \
386 tcp_bpf_bypass_getsockopt, \
387 level, optname)) \
388 __ret = __cgroup_bpf_run_filter_getsockopt( \
389 sock, level, optname, optval, optlen, \
390 max_optlen, retval); \
391 __ret; \
392 })
393
394 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
395 optlen, retval) \
396 ({ \
397 int __ret = retval; \
398 if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
399 __ret = __cgroup_bpf_run_filter_getsockopt_kern( \
400 sock, level, optname, optval, optlen, retval); \
401 __ret; \
402 })
403
404 int cgroup_bpf_prog_attach(const union bpf_attr *attr,
405 enum bpf_prog_type ptype, struct bpf_prog *prog);
406 int cgroup_bpf_prog_detach(const union bpf_attr *attr,
407 enum bpf_prog_type ptype);
408 int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
409 int cgroup_bpf_prog_query(const union bpf_attr *attr,
410 union bpf_attr __user *uattr);
411
412 const struct bpf_func_proto *
413 cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
414 const struct bpf_func_proto *
415 cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog);
416 #else
417
cgroup_bpf_inherit(struct cgroup * cgrp)418 static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
cgroup_bpf_offline(struct cgroup * cgrp)419 static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
420
cgroup_bpf_prog_attach(const union bpf_attr * attr,enum bpf_prog_type ptype,struct bpf_prog * prog)421 static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
422 enum bpf_prog_type ptype,
423 struct bpf_prog *prog)
424 {
425 return -EINVAL;
426 }
427
cgroup_bpf_prog_detach(const union bpf_attr * attr,enum bpf_prog_type ptype)428 static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
429 enum bpf_prog_type ptype)
430 {
431 return -EINVAL;
432 }
433
cgroup_bpf_link_attach(const union bpf_attr * attr,struct bpf_prog * prog)434 static inline int cgroup_bpf_link_attach(const union bpf_attr *attr,
435 struct bpf_prog *prog)
436 {
437 return -EINVAL;
438 }
439
cgroup_bpf_prog_query(const union bpf_attr * attr,union bpf_attr __user * uattr)440 static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
441 union bpf_attr __user *uattr)
442 {
443 return -EINVAL;
444 }
445
446 static inline const struct bpf_func_proto *
cgroup_common_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)447 cgroup_common_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
448 {
449 return NULL;
450 }
451
452 static inline const struct bpf_func_proto *
cgroup_current_func_proto(enum bpf_func_id func_id,const struct bpf_prog * prog)453 cgroup_current_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
454 {
455 return NULL;
456 }
457
bpf_cgroup_storage_assign(struct bpf_prog_aux * aux,struct bpf_map * map)458 static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
459 struct bpf_map *map) { return 0; }
bpf_cgroup_storage_alloc(struct bpf_prog * prog,enum bpf_cgroup_storage_type stype)460 static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
461 struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
bpf_cgroup_storage_free(struct bpf_cgroup_storage * storage)462 static inline void bpf_cgroup_storage_free(
463 struct bpf_cgroup_storage *storage) {}
bpf_percpu_cgroup_storage_copy(struct bpf_map * map,void * key,void * value)464 static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key,
465 void *value) {
466 return 0;
467 }
bpf_percpu_cgroup_storage_update(struct bpf_map * map,void * key,void * value,u64 flags)468 static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
469 void *key, void *value, u64 flags) {
470 return 0;
471 }
472
473 #define cgroup_bpf_enabled(atype) (0)
474 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, atype, t_ctx) ({ 0; })
475 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, atype) ({ 0; })
476 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
477 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
478 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
479 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
480 #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
481 #define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, uaddrlen, atype, flags) ({ 0; })
482 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
483 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
484 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr, uaddrlen) ({ 0; })
485 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; })
486 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, uaddrlen) ({ 0; })
487 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; })
488 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; })
489 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; })
490 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; })
491 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; })
492 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
493 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; })
494 #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
495 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
496 optlen, max_optlen, retval) ({ retval; })
497 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \
498 optlen, retval) ({ retval; })
499 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \
500 kernel_optval) ({ 0; })
501
502 #define for_each_cgroup_storage_type(stype) for (; false; )
503
504 #endif /* CONFIG_CGROUP_BPF */
505
506 #endif /* _BPF_CGROUP_H */
507