1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _BPF_CGROUP_H 3 #define _BPF_CGROUP_H 4 5 #include <linux/bpf.h> 6 #include <linux/bpf-cgroup-defs.h> 7 #include <linux/errno.h> 8 #include <linux/jump_label.h> 9 #include <linux/percpu.h> 10 #include <linux/rbtree.h> 11 #include <net/sock.h> 12 #include <uapi/linux/bpf.h> 13 14 struct sock; 15 struct sockaddr; 16 struct cgroup; 17 struct sk_buff; 18 struct bpf_map; 19 struct bpf_prog; 20 struct bpf_sock_ops_kern; 21 struct bpf_cgroup_storage; 22 struct ctl_table; 23 struct ctl_table_header; 24 struct task_struct; 25 26 #ifdef CONFIG_CGROUP_BPF 27 28 #define CGROUP_ATYPE(type) \ 29 case BPF_##type: return type 30 31 static inline enum cgroup_bpf_attach_type 32 to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type) 33 { 34 switch (attach_type) { 35 CGROUP_ATYPE(CGROUP_INET_INGRESS); 36 CGROUP_ATYPE(CGROUP_INET_EGRESS); 37 CGROUP_ATYPE(CGROUP_INET_SOCK_CREATE); 38 CGROUP_ATYPE(CGROUP_SOCK_OPS); 39 CGROUP_ATYPE(CGROUP_DEVICE); 40 CGROUP_ATYPE(CGROUP_INET4_BIND); 41 CGROUP_ATYPE(CGROUP_INET6_BIND); 42 CGROUP_ATYPE(CGROUP_INET4_CONNECT); 43 CGROUP_ATYPE(CGROUP_INET6_CONNECT); 44 CGROUP_ATYPE(CGROUP_INET4_POST_BIND); 45 CGROUP_ATYPE(CGROUP_INET6_POST_BIND); 46 CGROUP_ATYPE(CGROUP_UDP4_SENDMSG); 47 CGROUP_ATYPE(CGROUP_UDP6_SENDMSG); 48 CGROUP_ATYPE(CGROUP_SYSCTL); 49 CGROUP_ATYPE(CGROUP_UDP4_RECVMSG); 50 CGROUP_ATYPE(CGROUP_UDP6_RECVMSG); 51 CGROUP_ATYPE(CGROUP_GETSOCKOPT); 52 CGROUP_ATYPE(CGROUP_SETSOCKOPT); 53 CGROUP_ATYPE(CGROUP_INET4_GETPEERNAME); 54 CGROUP_ATYPE(CGROUP_INET6_GETPEERNAME); 55 CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME); 56 CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME); 57 CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE); 58 default: 59 return CGROUP_BPF_ATTACH_TYPE_INVALID; 60 } 61 } 62 63 #undef CGROUP_ATYPE 64 65 extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE]; 66 #define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype]) 67 68 #define for_each_cgroup_storage_type(stype) \ 69 for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++) 70 71 struct bpf_cgroup_storage_map; 72 73 struct bpf_storage_buffer { 74 struct rcu_head rcu; 75 char data[]; 76 }; 77 78 struct bpf_cgroup_storage { 79 union { 80 struct bpf_storage_buffer *buf; 81 void __percpu *percpu_buf; 82 }; 83 struct bpf_cgroup_storage_map *map; 84 struct bpf_cgroup_storage_key key; 85 struct list_head list_map; 86 struct list_head list_cg; 87 struct rb_node node; 88 struct rcu_head rcu; 89 }; 90 91 struct bpf_cgroup_link { 92 struct bpf_link link; 93 struct cgroup *cgroup; 94 enum bpf_attach_type type; 95 }; 96 97 struct bpf_prog_list { 98 struct list_head node; 99 struct bpf_prog *prog; 100 struct bpf_cgroup_link *link; 101 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]; 102 }; 103 104 int cgroup_bpf_inherit(struct cgroup *cgrp); 105 void cgroup_bpf_offline(struct cgroup *cgrp); 106 107 int __cgroup_bpf_run_filter_skb(struct sock *sk, 108 struct sk_buff *skb, 109 enum cgroup_bpf_attach_type atype); 110 111 int __cgroup_bpf_run_filter_sk(struct sock *sk, 112 enum cgroup_bpf_attach_type atype); 113 114 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, 115 struct sockaddr *uaddr, 116 enum cgroup_bpf_attach_type atype, 117 void *t_ctx, 118 u32 *flags); 119 120 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, 121 struct bpf_sock_ops_kern *sock_ops, 122 enum cgroup_bpf_attach_type atype); 123 124 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, 125 short access, enum cgroup_bpf_attach_type atype); 126 127 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, 128 struct ctl_table *table, int write, 129 char **buf, size_t *pcount, loff_t *ppos, 130 enum cgroup_bpf_attach_type atype); 131 132 int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level, 133 int *optname, char __user *optval, 134 int *optlen, char **kernel_optval); 135 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, 136 int optname, char __user *optval, 137 int __user *optlen, int max_optlen, 138 int retval); 139 140 int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level, 141 int optname, void *optval, 142 int *optlen, int retval); 143 144 static inline enum bpf_cgroup_storage_type cgroup_storage_type( 145 struct bpf_map *map) 146 { 147 if (map->map_type == BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE) 148 return BPF_CGROUP_STORAGE_PERCPU; 149 150 return BPF_CGROUP_STORAGE_SHARED; 151 } 152 153 struct bpf_cgroup_storage * 154 cgroup_storage_lookup(struct bpf_cgroup_storage_map *map, 155 void *key, bool locked); 156 struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog, 157 enum bpf_cgroup_storage_type stype); 158 void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage); 159 void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage, 160 struct cgroup *cgroup, 161 enum bpf_attach_type type); 162 void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage); 163 int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map); 164 165 int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value); 166 int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key, 167 void *value, u64 flags); 168 169 /* Opportunistic check to see whether we have any BPF program attached*/ 170 static inline bool cgroup_bpf_sock_enabled(struct sock *sk, 171 enum cgroup_bpf_attach_type type) 172 { 173 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 174 struct bpf_prog_array *array; 175 176 array = rcu_access_pointer(cgrp->bpf.effective[type]); 177 return array != &bpf_empty_prog_array.hdr; 178 } 179 180 /* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */ 181 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \ 182 ({ \ 183 int __ret = 0; \ 184 if (cgroup_bpf_enabled(CGROUP_INET_INGRESS) && \ 185 cgroup_bpf_sock_enabled(sk, CGROUP_INET_INGRESS)) \ 186 __ret = __cgroup_bpf_run_filter_skb(sk, skb, \ 187 CGROUP_INET_INGRESS); \ 188 \ 189 __ret; \ 190 }) 191 192 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb) \ 193 ({ \ 194 int __ret = 0; \ 195 if (cgroup_bpf_enabled(CGROUP_INET_EGRESS) && sk && sk == skb->sk) { \ 196 typeof(sk) __sk = sk_to_full_sk(sk); \ 197 if (sk_fullsock(__sk) && \ 198 cgroup_bpf_sock_enabled(__sk, CGROUP_INET_EGRESS)) \ 199 __ret = __cgroup_bpf_run_filter_skb(__sk, skb, \ 200 CGROUP_INET_EGRESS); \ 201 } \ 202 __ret; \ 203 }) 204 205 #define BPF_CGROUP_RUN_SK_PROG(sk, atype) \ 206 ({ \ 207 int __ret = 0; \ 208 if (cgroup_bpf_enabled(atype)) { \ 209 __ret = __cgroup_bpf_run_filter_sk(sk, atype); \ 210 } \ 211 __ret; \ 212 }) 213 214 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) \ 215 BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_CREATE) 216 217 #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) \ 218 BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET_SOCK_RELEASE) 219 220 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) \ 221 BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET4_POST_BIND) 222 223 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \ 224 BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND) 225 226 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) \ 227 ({ \ 228 u32 __unused_flags; \ 229 int __ret = 0; \ 230 if (cgroup_bpf_enabled(atype)) \ 231 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \ 232 NULL, \ 233 &__unused_flags); \ 234 __ret; \ 235 }) 236 237 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) \ 238 ({ \ 239 u32 __unused_flags; \ 240 int __ret = 0; \ 241 if (cgroup_bpf_enabled(atype)) { \ 242 lock_sock(sk); \ 243 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \ 244 t_ctx, \ 245 &__unused_flags); \ 246 release_sock(sk); \ 247 } \ 248 __ret; \ 249 }) 250 251 /* BPF_CGROUP_INET4_BIND and BPF_CGROUP_INET6_BIND can return extra flags 252 * via upper bits of return code. The only flag that is supported 253 * (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check 254 * should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE). 255 */ 256 #define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, bind_flags) \ 257 ({ \ 258 u32 __flags = 0; \ 259 int __ret = 0; \ 260 if (cgroup_bpf_enabled(atype)) { \ 261 lock_sock(sk); \ 262 __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \ 263 NULL, &__flags); \ 264 release_sock(sk); \ 265 if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE) \ 266 *bind_flags |= BIND_NO_CAP_NET_BIND_SERVICE; \ 267 } \ 268 __ret; \ 269 }) 270 271 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) \ 272 ((cgroup_bpf_enabled(CGROUP_INET4_CONNECT) || \ 273 cgroup_bpf_enabled(CGROUP_INET6_CONNECT)) && \ 274 (sk)->sk_prot->pre_connect) 275 276 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \ 277 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET4_CONNECT) 278 279 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \ 280 BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET6_CONNECT) 281 282 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \ 283 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET4_CONNECT, NULL) 284 285 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \ 286 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET6_CONNECT, NULL) 287 288 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \ 289 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_SENDMSG, t_ctx) 290 291 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \ 292 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_SENDMSG, t_ctx) 293 294 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \ 295 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_RECVMSG, NULL) 296 297 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \ 298 BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_RECVMSG, NULL) 299 300 /* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a 301 * fullsock and its parent fullsock cannot be traced by 302 * sk_to_full_sk(). 303 * 304 * e.g. sock_ops->sk is a request_sock and it is under syncookie mode. 305 * Its listener-sk is not attached to the rsk_listener. 306 * In this case, the caller holds the listener-sk (unlocked), 307 * set its sock_ops->sk to req_sk, and call this SOCK_OPS"_SK" with 308 * the listener-sk such that the cgroup-bpf-progs of the 309 * listener-sk will be run. 310 * 311 * Regardless of syncookie mode or not, 312 * calling bpf_setsockopt on listener-sk will not make sense anyway, 313 * so passing 'sock_ops->sk == req_sk' to the bpf prog is appropriate here. 314 */ 315 #define BPF_CGROUP_RUN_PROG_SOCK_OPS_SK(sock_ops, sk) \ 316 ({ \ 317 int __ret = 0; \ 318 if (cgroup_bpf_enabled(CGROUP_SOCK_OPS)) \ 319 __ret = __cgroup_bpf_run_filter_sock_ops(sk, \ 320 sock_ops, \ 321 CGROUP_SOCK_OPS); \ 322 __ret; \ 323 }) 324 325 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) \ 326 ({ \ 327 int __ret = 0; \ 328 if (cgroup_bpf_enabled(CGROUP_SOCK_OPS) && (sock_ops)->sk) { \ 329 typeof(sk) __sk = sk_to_full_sk((sock_ops)->sk); \ 330 if (__sk && sk_fullsock(__sk)) \ 331 __ret = __cgroup_bpf_run_filter_sock_ops(__sk, \ 332 sock_ops, \ 333 CGROUP_SOCK_OPS); \ 334 } \ 335 __ret; \ 336 }) 337 338 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) \ 339 ({ \ 340 int __ret = 0; \ 341 if (cgroup_bpf_enabled(CGROUP_DEVICE)) \ 342 __ret = __cgroup_bpf_check_dev_permission(atype, major, minor, \ 343 access, \ 344 CGROUP_DEVICE); \ 345 \ 346 __ret; \ 347 }) 348 349 350 #define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \ 351 ({ \ 352 int __ret = 0; \ 353 if (cgroup_bpf_enabled(CGROUP_SYSCTL)) \ 354 __ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \ 355 buf, count, pos, \ 356 CGROUP_SYSCTL); \ 357 __ret; \ 358 }) 359 360 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \ 361 kernel_optval) \ 362 ({ \ 363 int __ret = 0; \ 364 if (cgroup_bpf_enabled(CGROUP_SETSOCKOPT) && \ 365 cgroup_bpf_sock_enabled(sock, CGROUP_SETSOCKOPT)) \ 366 __ret = __cgroup_bpf_run_filter_setsockopt(sock, level, \ 367 optname, optval, \ 368 optlen, \ 369 kernel_optval); \ 370 __ret; \ 371 }) 372 373 #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) \ 374 ({ \ 375 int __ret = 0; \ 376 if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \ 377 get_user(__ret, optlen); \ 378 __ret; \ 379 }) 380 381 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, optlen, \ 382 max_optlen, retval) \ 383 ({ \ 384 int __ret = retval; \ 385 if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT) && \ 386 cgroup_bpf_sock_enabled(sock, CGROUP_GETSOCKOPT)) \ 387 if (!(sock)->sk_prot->bpf_bypass_getsockopt || \ 388 !INDIRECT_CALL_INET_1((sock)->sk_prot->bpf_bypass_getsockopt, \ 389 tcp_bpf_bypass_getsockopt, \ 390 level, optname)) \ 391 __ret = __cgroup_bpf_run_filter_getsockopt( \ 392 sock, level, optname, optval, optlen, \ 393 max_optlen, retval); \ 394 __ret; \ 395 }) 396 397 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \ 398 optlen, retval) \ 399 ({ \ 400 int __ret = retval; \ 401 if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \ 402 __ret = __cgroup_bpf_run_filter_getsockopt_kern( \ 403 sock, level, optname, optval, optlen, retval); \ 404 __ret; \ 405 }) 406 407 int cgroup_bpf_prog_attach(const union bpf_attr *attr, 408 enum bpf_prog_type ptype, struct bpf_prog *prog); 409 int cgroup_bpf_prog_detach(const union bpf_attr *attr, 410 enum bpf_prog_type ptype); 411 int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); 412 int cgroup_bpf_prog_query(const union bpf_attr *attr, 413 union bpf_attr __user *uattr); 414 #else 415 416 static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; } 417 static inline void cgroup_bpf_offline(struct cgroup *cgrp) {} 418 419 static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr, 420 enum bpf_prog_type ptype, 421 struct bpf_prog *prog) 422 { 423 return -EINVAL; 424 } 425 426 static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr, 427 enum bpf_prog_type ptype) 428 { 429 return -EINVAL; 430 } 431 432 static inline int cgroup_bpf_link_attach(const union bpf_attr *attr, 433 struct bpf_prog *prog) 434 { 435 return -EINVAL; 436 } 437 438 static inline int cgroup_bpf_prog_query(const union bpf_attr *attr, 439 union bpf_attr __user *uattr) 440 { 441 return -EINVAL; 442 } 443 444 static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, 445 struct bpf_map *map) { return 0; } 446 static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc( 447 struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; } 448 static inline void bpf_cgroup_storage_free( 449 struct bpf_cgroup_storage *storage) {} 450 static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, 451 void *value) { 452 return 0; 453 } 454 static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, 455 void *key, void *value, u64 flags) { 456 return 0; 457 } 458 459 #define cgroup_bpf_enabled(atype) (0) 460 #define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) ({ 0; }) 461 #define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) ({ 0; }) 462 #define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0) 463 #define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; }) 464 #define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; }) 465 #define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; }) 466 #define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; }) 467 #define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, flags) ({ 0; }) 468 #define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; }) 469 #define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; }) 470 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; }) 471 #define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; }) 472 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; }) 473 #define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; }) 474 #define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; }) 475 #define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; }) 476 #define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; }) 477 #define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; }) 478 #define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; }) 479 #define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; }) 480 #define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; }) 481 #define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; }) 482 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \ 483 optlen, max_optlen, retval) ({ retval; }) 484 #define BPF_CGROUP_RUN_PROG_GETSOCKOPT_KERN(sock, level, optname, optval, \ 485 optlen, retval) ({ retval; }) 486 #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \ 487 kernel_optval) ({ 0; }) 488 489 #define for_each_cgroup_storage_type(stype) for (; false; ) 490 491 #endif /* CONFIG_CGROUP_BPF */ 492 493 #endif /* _BPF_CGROUP_H */ 494