1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Functions to manage eBPF programs attached to cgroups 4 * 5 * Copyright (c) 2016 Daniel Mack 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/atomic.h> 10 #include <linux/cgroup.h> 11 #include <linux/filter.h> 12 #include <linux/slab.h> 13 #include <linux/sysctl.h> 14 #include <linux/string.h> 15 #include <linux/bpf.h> 16 #include <linux/bpf-cgroup.h> 17 #include <net/sock.h> 18 #include <net/bpf_sk_storage.h> 19 20 #include "../cgroup/cgroup-internal.h" 21 22 DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key); 23 EXPORT_SYMBOL(cgroup_bpf_enabled_key); 24 25 void cgroup_bpf_offline(struct cgroup *cgrp) 26 { 27 cgroup_get(cgrp); 28 percpu_ref_kill(&cgrp->bpf.refcnt); 29 } 30 31 static void bpf_cgroup_storages_free(struct bpf_cgroup_storage *storages[]) 32 { 33 enum bpf_cgroup_storage_type stype; 34 35 for_each_cgroup_storage_type(stype) 36 bpf_cgroup_storage_free(storages[stype]); 37 } 38 39 static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage *storages[], 40 struct bpf_prog *prog) 41 { 42 enum bpf_cgroup_storage_type stype; 43 44 for_each_cgroup_storage_type(stype) { 45 storages[stype] = bpf_cgroup_storage_alloc(prog, stype); 46 if (IS_ERR(storages[stype])) { 47 storages[stype] = NULL; 48 bpf_cgroup_storages_free(storages); 49 return -ENOMEM; 50 } 51 } 52 53 return 0; 54 } 55 56 static void bpf_cgroup_storages_assign(struct bpf_cgroup_storage *dst[], 57 struct bpf_cgroup_storage *src[]) 58 { 59 enum bpf_cgroup_storage_type stype; 60 61 for_each_cgroup_storage_type(stype) 62 dst[stype] = src[stype]; 63 } 64 65 static void bpf_cgroup_storages_link(struct bpf_cgroup_storage *storages[], 66 struct cgroup* cgrp, 67 enum bpf_attach_type attach_type) 68 { 69 enum bpf_cgroup_storage_type stype; 70 71 for_each_cgroup_storage_type(stype) 72 bpf_cgroup_storage_link(storages[stype], cgrp, attach_type); 73 } 74 75 static void bpf_cgroup_storages_unlink(struct bpf_cgroup_storage *storages[]) 76 { 77 enum bpf_cgroup_storage_type stype; 78 79 for_each_cgroup_storage_type(stype) 80 bpf_cgroup_storage_unlink(storages[stype]); 81 } 82 83 /* Called when bpf_cgroup_link is auto-detached from dying cgroup. 84 * It drops cgroup and bpf_prog refcounts, and marks bpf_link as defunct. It 85 * doesn't free link memory, which will eventually be done by bpf_link's 86 * release() callback, when its last FD is closed. 87 */ 88 static void bpf_cgroup_link_auto_detach(struct bpf_cgroup_link *link) 89 { 90 cgroup_put(link->cgroup); 91 link->cgroup = NULL; 92 } 93 94 /** 95 * cgroup_bpf_release() - put references of all bpf programs and 96 * release all cgroup bpf data 97 * @work: work structure embedded into the cgroup to modify 98 */ 99 static void cgroup_bpf_release(struct work_struct *work) 100 { 101 struct cgroup *p, *cgrp = container_of(work, struct cgroup, 102 bpf.release_work); 103 struct bpf_prog_array *old_array; 104 unsigned int type; 105 106 mutex_lock(&cgroup_mutex); 107 108 for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) { 109 struct list_head *progs = &cgrp->bpf.progs[type]; 110 struct bpf_prog_list *pl, *tmp; 111 112 list_for_each_entry_safe(pl, tmp, progs, node) { 113 list_del(&pl->node); 114 if (pl->prog) 115 bpf_prog_put(pl->prog); 116 if (pl->link) 117 bpf_cgroup_link_auto_detach(pl->link); 118 bpf_cgroup_storages_unlink(pl->storage); 119 bpf_cgroup_storages_free(pl->storage); 120 kfree(pl); 121 static_branch_dec(&cgroup_bpf_enabled_key); 122 } 123 old_array = rcu_dereference_protected( 124 cgrp->bpf.effective[type], 125 lockdep_is_held(&cgroup_mutex)); 126 bpf_prog_array_free(old_array); 127 } 128 129 mutex_unlock(&cgroup_mutex); 130 131 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) 132 cgroup_bpf_put(p); 133 134 percpu_ref_exit(&cgrp->bpf.refcnt); 135 cgroup_put(cgrp); 136 } 137 138 /** 139 * cgroup_bpf_release_fn() - callback used to schedule releasing 140 * of bpf cgroup data 141 * @ref: percpu ref counter structure 142 */ 143 static void cgroup_bpf_release_fn(struct percpu_ref *ref) 144 { 145 struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt); 146 147 INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release); 148 queue_work(system_wq, &cgrp->bpf.release_work); 149 } 150 151 /* Get underlying bpf_prog of bpf_prog_list entry, regardless if it's through 152 * link or direct prog. 153 */ 154 static struct bpf_prog *prog_list_prog(struct bpf_prog_list *pl) 155 { 156 if (pl->prog) 157 return pl->prog; 158 if (pl->link) 159 return pl->link->link.prog; 160 return NULL; 161 } 162 163 /* count number of elements in the list. 164 * it's slow but the list cannot be long 165 */ 166 static u32 prog_list_length(struct list_head *head) 167 { 168 struct bpf_prog_list *pl; 169 u32 cnt = 0; 170 171 list_for_each_entry(pl, head, node) { 172 if (!prog_list_prog(pl)) 173 continue; 174 cnt++; 175 } 176 return cnt; 177 } 178 179 /* if parent has non-overridable prog attached, 180 * disallow attaching new programs to the descendent cgroup. 181 * if parent has overridable or multi-prog, allow attaching 182 */ 183 static bool hierarchy_allows_attach(struct cgroup *cgrp, 184 enum bpf_attach_type type) 185 { 186 struct cgroup *p; 187 188 p = cgroup_parent(cgrp); 189 if (!p) 190 return true; 191 do { 192 u32 flags = p->bpf.flags[type]; 193 u32 cnt; 194 195 if (flags & BPF_F_ALLOW_MULTI) 196 return true; 197 cnt = prog_list_length(&p->bpf.progs[type]); 198 WARN_ON_ONCE(cnt > 1); 199 if (cnt == 1) 200 return !!(flags & BPF_F_ALLOW_OVERRIDE); 201 p = cgroup_parent(p); 202 } while (p); 203 return true; 204 } 205 206 /* compute a chain of effective programs for a given cgroup: 207 * start from the list of programs in this cgroup and add 208 * all parent programs. 209 * Note that parent's F_ALLOW_OVERRIDE-type program is yielding 210 * to programs in this cgroup 211 */ 212 static int compute_effective_progs(struct cgroup *cgrp, 213 enum bpf_attach_type type, 214 struct bpf_prog_array **array) 215 { 216 struct bpf_prog_array_item *item; 217 struct bpf_prog_array *progs; 218 struct bpf_prog_list *pl; 219 struct cgroup *p = cgrp; 220 int cnt = 0; 221 222 /* count number of effective programs by walking parents */ 223 do { 224 if (cnt == 0 || (p->bpf.flags[type] & BPF_F_ALLOW_MULTI)) 225 cnt += prog_list_length(&p->bpf.progs[type]); 226 p = cgroup_parent(p); 227 } while (p); 228 229 progs = bpf_prog_array_alloc(cnt, GFP_KERNEL); 230 if (!progs) 231 return -ENOMEM; 232 233 /* populate the array with effective progs */ 234 cnt = 0; 235 p = cgrp; 236 do { 237 if (cnt > 0 && !(p->bpf.flags[type] & BPF_F_ALLOW_MULTI)) 238 continue; 239 240 list_for_each_entry(pl, &p->bpf.progs[type], node) { 241 if (!prog_list_prog(pl)) 242 continue; 243 244 item = &progs->items[cnt]; 245 item->prog = prog_list_prog(pl); 246 bpf_cgroup_storages_assign(item->cgroup_storage, 247 pl->storage); 248 cnt++; 249 } 250 } while ((p = cgroup_parent(p))); 251 252 *array = progs; 253 return 0; 254 } 255 256 static void activate_effective_progs(struct cgroup *cgrp, 257 enum bpf_attach_type type, 258 struct bpf_prog_array *old_array) 259 { 260 old_array = rcu_replace_pointer(cgrp->bpf.effective[type], old_array, 261 lockdep_is_held(&cgroup_mutex)); 262 /* free prog array after grace period, since __cgroup_bpf_run_*() 263 * might be still walking the array 264 */ 265 bpf_prog_array_free(old_array); 266 } 267 268 /** 269 * cgroup_bpf_inherit() - inherit effective programs from parent 270 * @cgrp: the cgroup to modify 271 */ 272 int cgroup_bpf_inherit(struct cgroup *cgrp) 273 { 274 /* has to use marco instead of const int, since compiler thinks 275 * that array below is variable length 276 */ 277 #define NR ARRAY_SIZE(cgrp->bpf.effective) 278 struct bpf_prog_array *arrays[NR] = {}; 279 struct cgroup *p; 280 int ret, i; 281 282 ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0, 283 GFP_KERNEL); 284 if (ret) 285 return ret; 286 287 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) 288 cgroup_bpf_get(p); 289 290 for (i = 0; i < NR; i++) 291 INIT_LIST_HEAD(&cgrp->bpf.progs[i]); 292 293 for (i = 0; i < NR; i++) 294 if (compute_effective_progs(cgrp, i, &arrays[i])) 295 goto cleanup; 296 297 for (i = 0; i < NR; i++) 298 activate_effective_progs(cgrp, i, arrays[i]); 299 300 return 0; 301 cleanup: 302 for (i = 0; i < NR; i++) 303 bpf_prog_array_free(arrays[i]); 304 305 percpu_ref_exit(&cgrp->bpf.refcnt); 306 307 return -ENOMEM; 308 } 309 310 static int update_effective_progs(struct cgroup *cgrp, 311 enum bpf_attach_type type) 312 { 313 struct cgroup_subsys_state *css; 314 int err; 315 316 /* allocate and recompute effective prog arrays */ 317 css_for_each_descendant_pre(css, &cgrp->self) { 318 struct cgroup *desc = container_of(css, struct cgroup, self); 319 320 if (percpu_ref_is_zero(&desc->bpf.refcnt)) 321 continue; 322 323 err = compute_effective_progs(desc, type, &desc->bpf.inactive); 324 if (err) 325 goto cleanup; 326 } 327 328 /* all allocations were successful. Activate all prog arrays */ 329 css_for_each_descendant_pre(css, &cgrp->self) { 330 struct cgroup *desc = container_of(css, struct cgroup, self); 331 332 if (percpu_ref_is_zero(&desc->bpf.refcnt)) { 333 if (unlikely(desc->bpf.inactive)) { 334 bpf_prog_array_free(desc->bpf.inactive); 335 desc->bpf.inactive = NULL; 336 } 337 continue; 338 } 339 340 activate_effective_progs(desc, type, desc->bpf.inactive); 341 desc->bpf.inactive = NULL; 342 } 343 344 return 0; 345 346 cleanup: 347 /* oom while computing effective. Free all computed effective arrays 348 * since they were not activated 349 */ 350 css_for_each_descendant_pre(css, &cgrp->self) { 351 struct cgroup *desc = container_of(css, struct cgroup, self); 352 353 bpf_prog_array_free(desc->bpf.inactive); 354 desc->bpf.inactive = NULL; 355 } 356 357 return err; 358 } 359 360 #define BPF_CGROUP_MAX_PROGS 64 361 362 static struct bpf_prog_list *find_attach_entry(struct list_head *progs, 363 struct bpf_prog *prog, 364 struct bpf_cgroup_link *link, 365 struct bpf_prog *replace_prog, 366 bool allow_multi) 367 { 368 struct bpf_prog_list *pl; 369 370 /* single-attach case */ 371 if (!allow_multi) { 372 if (list_empty(progs)) 373 return NULL; 374 return list_first_entry(progs, typeof(*pl), node); 375 } 376 377 list_for_each_entry(pl, progs, node) { 378 if (prog && pl->prog == prog) 379 /* disallow attaching the same prog twice */ 380 return ERR_PTR(-EINVAL); 381 if (link && pl->link == link) 382 /* disallow attaching the same link twice */ 383 return ERR_PTR(-EINVAL); 384 } 385 386 /* direct prog multi-attach w/ replacement case */ 387 if (replace_prog) { 388 list_for_each_entry(pl, progs, node) { 389 if (pl->prog == replace_prog) 390 /* a match found */ 391 return pl; 392 } 393 /* prog to replace not found for cgroup */ 394 return ERR_PTR(-ENOENT); 395 } 396 397 return NULL; 398 } 399 400 /** 401 * __cgroup_bpf_attach() - Attach the program or the link to a cgroup, and 402 * propagate the change to descendants 403 * @cgrp: The cgroup which descendants to traverse 404 * @prog: A program to attach 405 * @link: A link to attach 406 * @replace_prog: Previously attached program to replace if BPF_F_REPLACE is set 407 * @type: Type of attach operation 408 * @flags: Option flags 409 * 410 * Exactly one of @prog or @link can be non-null. 411 * Must be called with cgroup_mutex held. 412 */ 413 int __cgroup_bpf_attach(struct cgroup *cgrp, 414 struct bpf_prog *prog, struct bpf_prog *replace_prog, 415 struct bpf_cgroup_link *link, 416 enum bpf_attach_type type, u32 flags) 417 { 418 u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI)); 419 struct list_head *progs = &cgrp->bpf.progs[type]; 420 struct bpf_prog *old_prog = NULL; 421 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE], 422 *old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {NULL}; 423 struct bpf_prog_list *pl; 424 int err; 425 426 if (((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) || 427 ((flags & BPF_F_REPLACE) && !(flags & BPF_F_ALLOW_MULTI))) 428 /* invalid combination */ 429 return -EINVAL; 430 if (link && (prog || replace_prog)) 431 /* only either link or prog/replace_prog can be specified */ 432 return -EINVAL; 433 if (!!replace_prog != !!(flags & BPF_F_REPLACE)) 434 /* replace_prog implies BPF_F_REPLACE, and vice versa */ 435 return -EINVAL; 436 437 if (!hierarchy_allows_attach(cgrp, type)) 438 return -EPERM; 439 440 if (!list_empty(progs) && cgrp->bpf.flags[type] != saved_flags) 441 /* Disallow attaching non-overridable on top 442 * of existing overridable in this cgroup. 443 * Disallow attaching multi-prog if overridable or none 444 */ 445 return -EPERM; 446 447 if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS) 448 return -E2BIG; 449 450 pl = find_attach_entry(progs, prog, link, replace_prog, 451 flags & BPF_F_ALLOW_MULTI); 452 if (IS_ERR(pl)) 453 return PTR_ERR(pl); 454 455 if (bpf_cgroup_storages_alloc(storage, prog ? : link->link.prog)) 456 return -ENOMEM; 457 458 if (pl) { 459 old_prog = pl->prog; 460 bpf_cgroup_storages_unlink(pl->storage); 461 bpf_cgroup_storages_assign(old_storage, pl->storage); 462 } else { 463 pl = kmalloc(sizeof(*pl), GFP_KERNEL); 464 if (!pl) { 465 bpf_cgroup_storages_free(storage); 466 return -ENOMEM; 467 } 468 list_add_tail(&pl->node, progs); 469 } 470 471 pl->prog = prog; 472 pl->link = link; 473 bpf_cgroup_storages_assign(pl->storage, storage); 474 cgrp->bpf.flags[type] = saved_flags; 475 476 err = update_effective_progs(cgrp, type); 477 if (err) 478 goto cleanup; 479 480 bpf_cgroup_storages_free(old_storage); 481 if (old_prog) 482 bpf_prog_put(old_prog); 483 else 484 static_branch_inc(&cgroup_bpf_enabled_key); 485 bpf_cgroup_storages_link(pl->storage, cgrp, type); 486 return 0; 487 488 cleanup: 489 if (old_prog) { 490 pl->prog = old_prog; 491 pl->link = NULL; 492 } 493 bpf_cgroup_storages_free(pl->storage); 494 bpf_cgroup_storages_assign(pl->storage, old_storage); 495 bpf_cgroup_storages_link(pl->storage, cgrp, type); 496 if (!old_prog) { 497 list_del(&pl->node); 498 kfree(pl); 499 } 500 return err; 501 } 502 503 /* Swap updated BPF program for given link in effective program arrays across 504 * all descendant cgroups. This function is guaranteed to succeed. 505 */ 506 static void replace_effective_prog(struct cgroup *cgrp, 507 enum bpf_attach_type type, 508 struct bpf_cgroup_link *link) 509 { 510 struct bpf_prog_array_item *item; 511 struct cgroup_subsys_state *css; 512 struct bpf_prog_array *progs; 513 struct bpf_prog_list *pl; 514 struct list_head *head; 515 struct cgroup *cg; 516 int pos; 517 518 css_for_each_descendant_pre(css, &cgrp->self) { 519 struct cgroup *desc = container_of(css, struct cgroup, self); 520 521 if (percpu_ref_is_zero(&desc->bpf.refcnt)) 522 continue; 523 524 /* find position of link in effective progs array */ 525 for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) { 526 if (pos && !(cg->bpf.flags[type] & BPF_F_ALLOW_MULTI)) 527 continue; 528 529 head = &cg->bpf.progs[type]; 530 list_for_each_entry(pl, head, node) { 531 if (!prog_list_prog(pl)) 532 continue; 533 if (pl->link == link) 534 goto found; 535 pos++; 536 } 537 } 538 found: 539 BUG_ON(!cg); 540 progs = rcu_dereference_protected( 541 desc->bpf.effective[type], 542 lockdep_is_held(&cgroup_mutex)); 543 item = &progs->items[pos]; 544 WRITE_ONCE(item->prog, link->link.prog); 545 } 546 } 547 548 /** 549 * __cgroup_bpf_replace() - Replace link's program and propagate the change 550 * to descendants 551 * @cgrp: The cgroup which descendants to traverse 552 * @link: A link for which to replace BPF program 553 * @type: Type of attach operation 554 * 555 * Must be called with cgroup_mutex held. 556 */ 557 int __cgroup_bpf_replace(struct cgroup *cgrp, struct bpf_cgroup_link *link, 558 struct bpf_prog *new_prog) 559 { 560 struct list_head *progs = &cgrp->bpf.progs[link->type]; 561 struct bpf_prog *old_prog; 562 struct bpf_prog_list *pl; 563 bool found = false; 564 565 if (link->link.prog->type != new_prog->type) 566 return -EINVAL; 567 568 list_for_each_entry(pl, progs, node) { 569 if (pl->link == link) { 570 found = true; 571 break; 572 } 573 } 574 if (!found) 575 return -ENOENT; 576 577 old_prog = xchg(&link->link.prog, new_prog); 578 replace_effective_prog(cgrp, link->type, link); 579 bpf_prog_put(old_prog); 580 return 0; 581 } 582 583 static struct bpf_prog_list *find_detach_entry(struct list_head *progs, 584 struct bpf_prog *prog, 585 struct bpf_cgroup_link *link, 586 bool allow_multi) 587 { 588 struct bpf_prog_list *pl; 589 590 if (!allow_multi) { 591 if (list_empty(progs)) 592 /* report error when trying to detach and nothing is attached */ 593 return ERR_PTR(-ENOENT); 594 595 /* to maintain backward compatibility NONE and OVERRIDE cgroups 596 * allow detaching with invalid FD (prog==NULL) in legacy mode 597 */ 598 return list_first_entry(progs, typeof(*pl), node); 599 } 600 601 if (!prog && !link) 602 /* to detach MULTI prog the user has to specify valid FD 603 * of the program or link to be detached 604 */ 605 return ERR_PTR(-EINVAL); 606 607 /* find the prog or link and detach it */ 608 list_for_each_entry(pl, progs, node) { 609 if (pl->prog == prog && pl->link == link) 610 return pl; 611 } 612 return ERR_PTR(-ENOENT); 613 } 614 615 /** 616 * __cgroup_bpf_detach() - Detach the program or link from a cgroup, and 617 * propagate the change to descendants 618 * @cgrp: The cgroup which descendants to traverse 619 * @prog: A program to detach or NULL 620 * @prog: A link to detach or NULL 621 * @type: Type of detach operation 622 * 623 * At most one of @prog or @link can be non-NULL. 624 * Must be called with cgroup_mutex held. 625 */ 626 int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, 627 struct bpf_cgroup_link *link, enum bpf_attach_type type) 628 { 629 struct list_head *progs = &cgrp->bpf.progs[type]; 630 u32 flags = cgrp->bpf.flags[type]; 631 struct bpf_prog_list *pl; 632 struct bpf_prog *old_prog; 633 int err; 634 635 if (prog && link) 636 /* only one of prog or link can be specified */ 637 return -EINVAL; 638 639 pl = find_detach_entry(progs, prog, link, flags & BPF_F_ALLOW_MULTI); 640 if (IS_ERR(pl)) 641 return PTR_ERR(pl); 642 643 /* mark it deleted, so it's ignored while recomputing effective */ 644 old_prog = pl->prog; 645 pl->prog = NULL; 646 pl->link = NULL; 647 648 err = update_effective_progs(cgrp, type); 649 if (err) 650 goto cleanup; 651 652 /* now can actually delete it from this cgroup list */ 653 list_del(&pl->node); 654 bpf_cgroup_storages_unlink(pl->storage); 655 bpf_cgroup_storages_free(pl->storage); 656 kfree(pl); 657 if (list_empty(progs)) 658 /* last program was detached, reset flags to zero */ 659 cgrp->bpf.flags[type] = 0; 660 if (old_prog) 661 bpf_prog_put(old_prog); 662 static_branch_dec(&cgroup_bpf_enabled_key); 663 return 0; 664 665 cleanup: 666 /* restore back prog or link */ 667 pl->prog = old_prog; 668 pl->link = link; 669 return err; 670 } 671 672 /* Must be called with cgroup_mutex held to avoid races. */ 673 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, 674 union bpf_attr __user *uattr) 675 { 676 __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); 677 enum bpf_attach_type type = attr->query.attach_type; 678 struct list_head *progs = &cgrp->bpf.progs[type]; 679 u32 flags = cgrp->bpf.flags[type]; 680 struct bpf_prog_array *effective; 681 struct bpf_prog *prog; 682 int cnt, ret = 0, i; 683 684 effective = rcu_dereference_protected(cgrp->bpf.effective[type], 685 lockdep_is_held(&cgroup_mutex)); 686 687 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) 688 cnt = bpf_prog_array_length(effective); 689 else 690 cnt = prog_list_length(progs); 691 692 if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags))) 693 return -EFAULT; 694 if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt))) 695 return -EFAULT; 696 if (attr->query.prog_cnt == 0 || !prog_ids || !cnt) 697 /* return early if user requested only program count + flags */ 698 return 0; 699 if (attr->query.prog_cnt < cnt) { 700 cnt = attr->query.prog_cnt; 701 ret = -ENOSPC; 702 } 703 704 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) { 705 return bpf_prog_array_copy_to_user(effective, prog_ids, cnt); 706 } else { 707 struct bpf_prog_list *pl; 708 u32 id; 709 710 i = 0; 711 list_for_each_entry(pl, progs, node) { 712 prog = prog_list_prog(pl); 713 id = prog->aux->id; 714 if (copy_to_user(prog_ids + i, &id, sizeof(id))) 715 return -EFAULT; 716 if (++i == cnt) 717 break; 718 } 719 } 720 return ret; 721 } 722 723 int cgroup_bpf_prog_attach(const union bpf_attr *attr, 724 enum bpf_prog_type ptype, struct bpf_prog *prog) 725 { 726 struct bpf_prog *replace_prog = NULL; 727 struct cgroup *cgrp; 728 int ret; 729 730 cgrp = cgroup_get_from_fd(attr->target_fd); 731 if (IS_ERR(cgrp)) 732 return PTR_ERR(cgrp); 733 734 if ((attr->attach_flags & BPF_F_ALLOW_MULTI) && 735 (attr->attach_flags & BPF_F_REPLACE)) { 736 replace_prog = bpf_prog_get_type(attr->replace_bpf_fd, ptype); 737 if (IS_ERR(replace_prog)) { 738 cgroup_put(cgrp); 739 return PTR_ERR(replace_prog); 740 } 741 } 742 743 ret = cgroup_bpf_attach(cgrp, prog, replace_prog, NULL, 744 attr->attach_type, attr->attach_flags); 745 746 if (replace_prog) 747 bpf_prog_put(replace_prog); 748 cgroup_put(cgrp); 749 return ret; 750 } 751 752 int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) 753 { 754 struct bpf_prog *prog; 755 struct cgroup *cgrp; 756 int ret; 757 758 cgrp = cgroup_get_from_fd(attr->target_fd); 759 if (IS_ERR(cgrp)) 760 return PTR_ERR(cgrp); 761 762 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 763 if (IS_ERR(prog)) 764 prog = NULL; 765 766 ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type); 767 if (prog) 768 bpf_prog_put(prog); 769 770 cgroup_put(cgrp); 771 return ret; 772 } 773 774 static void bpf_cgroup_link_release(struct bpf_link *link) 775 { 776 struct bpf_cgroup_link *cg_link = 777 container_of(link, struct bpf_cgroup_link, link); 778 779 /* link might have been auto-detached by dying cgroup already, 780 * in that case our work is done here 781 */ 782 if (!cg_link->cgroup) 783 return; 784 785 mutex_lock(&cgroup_mutex); 786 787 /* re-check cgroup under lock again */ 788 if (!cg_link->cgroup) { 789 mutex_unlock(&cgroup_mutex); 790 return; 791 } 792 793 WARN_ON(__cgroup_bpf_detach(cg_link->cgroup, NULL, cg_link, 794 cg_link->type)); 795 796 mutex_unlock(&cgroup_mutex); 797 cgroup_put(cg_link->cgroup); 798 } 799 800 static void bpf_cgroup_link_dealloc(struct bpf_link *link) 801 { 802 struct bpf_cgroup_link *cg_link = 803 container_of(link, struct bpf_cgroup_link, link); 804 805 kfree(cg_link); 806 } 807 808 const struct bpf_link_ops bpf_cgroup_link_lops = { 809 .release = bpf_cgroup_link_release, 810 .dealloc = bpf_cgroup_link_dealloc, 811 }; 812 813 int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 814 { 815 struct bpf_cgroup_link *link; 816 struct file *link_file; 817 struct cgroup *cgrp; 818 int err, link_fd; 819 820 if (attr->link_create.flags) 821 return -EINVAL; 822 823 cgrp = cgroup_get_from_fd(attr->link_create.target_fd); 824 if (IS_ERR(cgrp)) 825 return PTR_ERR(cgrp); 826 827 link = kzalloc(sizeof(*link), GFP_USER); 828 if (!link) { 829 err = -ENOMEM; 830 goto out_put_cgroup; 831 } 832 bpf_link_init(&link->link, &bpf_cgroup_link_lops, prog); 833 link->cgroup = cgrp; 834 link->type = attr->link_create.attach_type; 835 836 link_file = bpf_link_new_file(&link->link, &link_fd); 837 if (IS_ERR(link_file)) { 838 kfree(link); 839 err = PTR_ERR(link_file); 840 goto out_put_cgroup; 841 } 842 843 err = cgroup_bpf_attach(cgrp, NULL, NULL, link, link->type, 844 BPF_F_ALLOW_MULTI); 845 if (err) { 846 bpf_link_cleanup(&link->link, link_file, link_fd); 847 goto out_put_cgroup; 848 } 849 850 fd_install(link_fd, link_file); 851 return link_fd; 852 853 out_put_cgroup: 854 cgroup_put(cgrp); 855 return err; 856 } 857 858 int cgroup_bpf_prog_query(const union bpf_attr *attr, 859 union bpf_attr __user *uattr) 860 { 861 struct cgroup *cgrp; 862 int ret; 863 864 cgrp = cgroup_get_from_fd(attr->query.target_fd); 865 if (IS_ERR(cgrp)) 866 return PTR_ERR(cgrp); 867 868 ret = cgroup_bpf_query(cgrp, attr, uattr); 869 870 cgroup_put(cgrp); 871 return ret; 872 } 873 874 /** 875 * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering 876 * @sk: The socket sending or receiving traffic 877 * @skb: The skb that is being sent or received 878 * @type: The type of program to be exectuted 879 * 880 * If no socket is passed, or the socket is not of type INET or INET6, 881 * this function does nothing and returns 0. 882 * 883 * The program type passed in via @type must be suitable for network 884 * filtering. No further check is performed to assert that. 885 * 886 * For egress packets, this function can return: 887 * NET_XMIT_SUCCESS (0) - continue with packet output 888 * NET_XMIT_DROP (1) - drop packet and notify TCP to call cwr 889 * NET_XMIT_CN (2) - continue with packet output and notify TCP 890 * to call cwr 891 * -EPERM - drop packet 892 * 893 * For ingress packets, this function will return -EPERM if any 894 * attached program was found and if it returned != 1 during execution. 895 * Otherwise 0 is returned. 896 */ 897 int __cgroup_bpf_run_filter_skb(struct sock *sk, 898 struct sk_buff *skb, 899 enum bpf_attach_type type) 900 { 901 unsigned int offset = skb->data - skb_network_header(skb); 902 struct sock *save_sk; 903 void *saved_data_end; 904 struct cgroup *cgrp; 905 int ret; 906 907 if (!sk || !sk_fullsock(sk)) 908 return 0; 909 910 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6) 911 return 0; 912 913 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 914 save_sk = skb->sk; 915 skb->sk = sk; 916 __skb_push(skb, offset); 917 918 /* compute pointers for the bpf prog */ 919 bpf_compute_and_save_data_end(skb, &saved_data_end); 920 921 if (type == BPF_CGROUP_INET_EGRESS) { 922 ret = BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY( 923 cgrp->bpf.effective[type], skb, __bpf_prog_run_save_cb); 924 } else { 925 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb, 926 __bpf_prog_run_save_cb); 927 ret = (ret == 1 ? 0 : -EPERM); 928 } 929 bpf_restore_data_end(skb, saved_data_end); 930 __skb_pull(skb, offset); 931 skb->sk = save_sk; 932 933 return ret; 934 } 935 EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb); 936 937 /** 938 * __cgroup_bpf_run_filter_sk() - Run a program on a sock 939 * @sk: sock structure to manipulate 940 * @type: The type of program to be exectuted 941 * 942 * socket is passed is expected to be of type INET or INET6. 943 * 944 * The program type passed in via @type must be suitable for sock 945 * filtering. No further check is performed to assert that. 946 * 947 * This function will return %-EPERM if any if an attached program was found 948 * and if it returned != 1 during execution. In all other cases, 0 is returned. 949 */ 950 int __cgroup_bpf_run_filter_sk(struct sock *sk, 951 enum bpf_attach_type type) 952 { 953 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 954 int ret; 955 956 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sk, BPF_PROG_RUN); 957 return ret == 1 ? 0 : -EPERM; 958 } 959 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk); 960 961 /** 962 * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and 963 * provided by user sockaddr 964 * @sk: sock struct that will use sockaddr 965 * @uaddr: sockaddr struct provided by user 966 * @type: The type of program to be exectuted 967 * @t_ctx: Pointer to attach type specific context 968 * 969 * socket is expected to be of type INET or INET6. 970 * 971 * This function will return %-EPERM if an attached program is found and 972 * returned value != 1 during execution. In all other cases, 0 is returned. 973 */ 974 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, 975 struct sockaddr *uaddr, 976 enum bpf_attach_type type, 977 void *t_ctx) 978 { 979 struct bpf_sock_addr_kern ctx = { 980 .sk = sk, 981 .uaddr = uaddr, 982 .t_ctx = t_ctx, 983 }; 984 struct sockaddr_storage unspec; 985 struct cgroup *cgrp; 986 int ret; 987 988 /* Check socket family since not all sockets represent network 989 * endpoint (e.g. AF_UNIX). 990 */ 991 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6) 992 return 0; 993 994 if (!ctx.uaddr) { 995 memset(&unspec, 0, sizeof(unspec)); 996 ctx.uaddr = (struct sockaddr *)&unspec; 997 } 998 999 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1000 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN); 1001 1002 return ret == 1 ? 0 : -EPERM; 1003 } 1004 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr); 1005 1006 /** 1007 * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock 1008 * @sk: socket to get cgroup from 1009 * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains 1010 * sk with connection information (IP addresses, etc.) May not contain 1011 * cgroup info if it is a req sock. 1012 * @type: The type of program to be exectuted 1013 * 1014 * socket passed is expected to be of type INET or INET6. 1015 * 1016 * The program type passed in via @type must be suitable for sock_ops 1017 * filtering. No further check is performed to assert that. 1018 * 1019 * This function will return %-EPERM if any if an attached program was found 1020 * and if it returned != 1 during execution. In all other cases, 0 is returned. 1021 */ 1022 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, 1023 struct bpf_sock_ops_kern *sock_ops, 1024 enum bpf_attach_type type) 1025 { 1026 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1027 int ret; 1028 1029 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sock_ops, 1030 BPF_PROG_RUN); 1031 return ret == 1 ? 0 : -EPERM; 1032 } 1033 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops); 1034 1035 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, 1036 short access, enum bpf_attach_type type) 1037 { 1038 struct cgroup *cgrp; 1039 struct bpf_cgroup_dev_ctx ctx = { 1040 .access_type = (access << 16) | dev_type, 1041 .major = major, 1042 .minor = minor, 1043 }; 1044 int allow = 1; 1045 1046 rcu_read_lock(); 1047 cgrp = task_dfl_cgroup(current); 1048 allow = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, 1049 BPF_PROG_RUN); 1050 rcu_read_unlock(); 1051 1052 return !allow; 1053 } 1054 EXPORT_SYMBOL(__cgroup_bpf_check_dev_permission); 1055 1056 static const struct bpf_func_proto * 1057 cgroup_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1058 { 1059 switch (func_id) { 1060 case BPF_FUNC_map_lookup_elem: 1061 return &bpf_map_lookup_elem_proto; 1062 case BPF_FUNC_map_update_elem: 1063 return &bpf_map_update_elem_proto; 1064 case BPF_FUNC_map_delete_elem: 1065 return &bpf_map_delete_elem_proto; 1066 case BPF_FUNC_map_push_elem: 1067 return &bpf_map_push_elem_proto; 1068 case BPF_FUNC_map_pop_elem: 1069 return &bpf_map_pop_elem_proto; 1070 case BPF_FUNC_map_peek_elem: 1071 return &bpf_map_peek_elem_proto; 1072 case BPF_FUNC_get_current_uid_gid: 1073 return &bpf_get_current_uid_gid_proto; 1074 case BPF_FUNC_get_local_storage: 1075 return &bpf_get_local_storage_proto; 1076 case BPF_FUNC_get_current_cgroup_id: 1077 return &bpf_get_current_cgroup_id_proto; 1078 case BPF_FUNC_trace_printk: 1079 if (capable(CAP_SYS_ADMIN)) 1080 return bpf_get_trace_printk_proto(); 1081 /* fall through */ 1082 default: 1083 return NULL; 1084 } 1085 } 1086 1087 static const struct bpf_func_proto * 1088 cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1089 { 1090 return cgroup_base_func_proto(func_id, prog); 1091 } 1092 1093 static bool cgroup_dev_is_valid_access(int off, int size, 1094 enum bpf_access_type type, 1095 const struct bpf_prog *prog, 1096 struct bpf_insn_access_aux *info) 1097 { 1098 const int size_default = sizeof(__u32); 1099 1100 if (type == BPF_WRITE) 1101 return false; 1102 1103 if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx)) 1104 return false; 1105 /* The verifier guarantees that size > 0. */ 1106 if (off % size != 0) 1107 return false; 1108 1109 switch (off) { 1110 case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type): 1111 bpf_ctx_record_field_size(info, size_default); 1112 if (!bpf_ctx_narrow_access_ok(off, size, size_default)) 1113 return false; 1114 break; 1115 default: 1116 if (size != size_default) 1117 return false; 1118 } 1119 1120 return true; 1121 } 1122 1123 const struct bpf_prog_ops cg_dev_prog_ops = { 1124 }; 1125 1126 const struct bpf_verifier_ops cg_dev_verifier_ops = { 1127 .get_func_proto = cgroup_dev_func_proto, 1128 .is_valid_access = cgroup_dev_is_valid_access, 1129 }; 1130 1131 /** 1132 * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl 1133 * 1134 * @head: sysctl table header 1135 * @table: sysctl table 1136 * @write: sysctl is being read (= 0) or written (= 1) 1137 * @buf: pointer to buffer passed by user space 1138 * @pcount: value-result argument: value is size of buffer pointed to by @buf, 1139 * result is size of @new_buf if program set new value, initial value 1140 * otherwise 1141 * @ppos: value-result argument: value is position at which read from or write 1142 * to sysctl is happening, result is new position if program overrode it, 1143 * initial value otherwise 1144 * @new_buf: pointer to pointer to new buffer that will be allocated if program 1145 * overrides new value provided by user space on sysctl write 1146 * NOTE: it's caller responsibility to free *new_buf if it was set 1147 * @type: type of program to be executed 1148 * 1149 * Program is run when sysctl is being accessed, either read or written, and 1150 * can allow or deny such access. 1151 * 1152 * This function will return %-EPERM if an attached program is found and 1153 * returned value != 1 during execution. In all other cases 0 is returned. 1154 */ 1155 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, 1156 struct ctl_table *table, int write, 1157 void __user *buf, size_t *pcount, 1158 loff_t *ppos, void **new_buf, 1159 enum bpf_attach_type type) 1160 { 1161 struct bpf_sysctl_kern ctx = { 1162 .head = head, 1163 .table = table, 1164 .write = write, 1165 .ppos = ppos, 1166 .cur_val = NULL, 1167 .cur_len = PAGE_SIZE, 1168 .new_val = NULL, 1169 .new_len = 0, 1170 .new_updated = 0, 1171 }; 1172 struct cgroup *cgrp; 1173 int ret; 1174 1175 ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL); 1176 if (ctx.cur_val) { 1177 mm_segment_t old_fs; 1178 loff_t pos = 0; 1179 1180 old_fs = get_fs(); 1181 set_fs(KERNEL_DS); 1182 if (table->proc_handler(table, 0, (void __user *)ctx.cur_val, 1183 &ctx.cur_len, &pos)) { 1184 /* Let BPF program decide how to proceed. */ 1185 ctx.cur_len = 0; 1186 } 1187 set_fs(old_fs); 1188 } else { 1189 /* Let BPF program decide how to proceed. */ 1190 ctx.cur_len = 0; 1191 } 1192 1193 if (write && buf && *pcount) { 1194 /* BPF program should be able to override new value with a 1195 * buffer bigger than provided by user. 1196 */ 1197 ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL); 1198 ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount); 1199 if (!ctx.new_val || 1200 copy_from_user(ctx.new_val, buf, ctx.new_len)) 1201 /* Let BPF program decide how to proceed. */ 1202 ctx.new_len = 0; 1203 } 1204 1205 rcu_read_lock(); 1206 cgrp = task_dfl_cgroup(current); 1207 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN); 1208 rcu_read_unlock(); 1209 1210 kfree(ctx.cur_val); 1211 1212 if (ret == 1 && ctx.new_updated) { 1213 *new_buf = ctx.new_val; 1214 *pcount = ctx.new_len; 1215 } else { 1216 kfree(ctx.new_val); 1217 } 1218 1219 return ret == 1 ? 0 : -EPERM; 1220 } 1221 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sysctl); 1222 1223 #ifdef CONFIG_NET 1224 static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp, 1225 enum bpf_attach_type attach_type) 1226 { 1227 struct bpf_prog_array *prog_array; 1228 bool empty; 1229 1230 rcu_read_lock(); 1231 prog_array = rcu_dereference(cgrp->bpf.effective[attach_type]); 1232 empty = bpf_prog_array_is_empty(prog_array); 1233 rcu_read_unlock(); 1234 1235 return empty; 1236 } 1237 1238 static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen) 1239 { 1240 if (unlikely(max_optlen > PAGE_SIZE) || max_optlen < 0) 1241 return -EINVAL; 1242 1243 ctx->optval = kzalloc(max_optlen, GFP_USER); 1244 if (!ctx->optval) 1245 return -ENOMEM; 1246 1247 ctx->optval_end = ctx->optval + max_optlen; 1248 1249 return 0; 1250 } 1251 1252 static void sockopt_free_buf(struct bpf_sockopt_kern *ctx) 1253 { 1254 kfree(ctx->optval); 1255 } 1256 1257 int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level, 1258 int *optname, char __user *optval, 1259 int *optlen, char **kernel_optval) 1260 { 1261 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1262 struct bpf_sockopt_kern ctx = { 1263 .sk = sk, 1264 .level = *level, 1265 .optname = *optname, 1266 }; 1267 int ret, max_optlen; 1268 1269 /* Opportunistic check to see whether we have any BPF program 1270 * attached to the hook so we don't waste time allocating 1271 * memory and locking the socket. 1272 */ 1273 if (!cgroup_bpf_enabled || 1274 __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_SETSOCKOPT)) 1275 return 0; 1276 1277 /* Allocate a bit more than the initial user buffer for 1278 * BPF program. The canonical use case is overriding 1279 * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic). 1280 */ 1281 max_optlen = max_t(int, 16, *optlen); 1282 1283 ret = sockopt_alloc_buf(&ctx, max_optlen); 1284 if (ret) 1285 return ret; 1286 1287 ctx.optlen = *optlen; 1288 1289 if (copy_from_user(ctx.optval, optval, *optlen) != 0) { 1290 ret = -EFAULT; 1291 goto out; 1292 } 1293 1294 lock_sock(sk); 1295 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_SETSOCKOPT], 1296 &ctx, BPF_PROG_RUN); 1297 release_sock(sk); 1298 1299 if (!ret) { 1300 ret = -EPERM; 1301 goto out; 1302 } 1303 1304 if (ctx.optlen == -1) { 1305 /* optlen set to -1, bypass kernel */ 1306 ret = 1; 1307 } else if (ctx.optlen > max_optlen || ctx.optlen < -1) { 1308 /* optlen is out of bounds */ 1309 ret = -EFAULT; 1310 } else { 1311 /* optlen within bounds, run kernel handler */ 1312 ret = 0; 1313 1314 /* export any potential modifications */ 1315 *level = ctx.level; 1316 *optname = ctx.optname; 1317 *optlen = ctx.optlen; 1318 *kernel_optval = ctx.optval; 1319 } 1320 1321 out: 1322 if (ret) 1323 sockopt_free_buf(&ctx); 1324 return ret; 1325 } 1326 EXPORT_SYMBOL(__cgroup_bpf_run_filter_setsockopt); 1327 1328 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, 1329 int optname, char __user *optval, 1330 int __user *optlen, int max_optlen, 1331 int retval) 1332 { 1333 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1334 struct bpf_sockopt_kern ctx = { 1335 .sk = sk, 1336 .level = level, 1337 .optname = optname, 1338 .retval = retval, 1339 }; 1340 int ret; 1341 1342 /* Opportunistic check to see whether we have any BPF program 1343 * attached to the hook so we don't waste time allocating 1344 * memory and locking the socket. 1345 */ 1346 if (!cgroup_bpf_enabled || 1347 __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT)) 1348 return retval; 1349 1350 ret = sockopt_alloc_buf(&ctx, max_optlen); 1351 if (ret) 1352 return ret; 1353 1354 ctx.optlen = max_optlen; 1355 1356 if (!retval) { 1357 /* If kernel getsockopt finished successfully, 1358 * copy whatever was returned to the user back 1359 * into our temporary buffer. Set optlen to the 1360 * one that kernel returned as well to let 1361 * BPF programs inspect the value. 1362 */ 1363 1364 if (get_user(ctx.optlen, optlen)) { 1365 ret = -EFAULT; 1366 goto out; 1367 } 1368 1369 if (ctx.optlen > max_optlen) 1370 ctx.optlen = max_optlen; 1371 1372 if (copy_from_user(ctx.optval, optval, ctx.optlen) != 0) { 1373 ret = -EFAULT; 1374 goto out; 1375 } 1376 } 1377 1378 lock_sock(sk); 1379 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_GETSOCKOPT], 1380 &ctx, BPF_PROG_RUN); 1381 release_sock(sk); 1382 1383 if (!ret) { 1384 ret = -EPERM; 1385 goto out; 1386 } 1387 1388 if (ctx.optlen > max_optlen) { 1389 ret = -EFAULT; 1390 goto out; 1391 } 1392 1393 /* BPF programs only allowed to set retval to 0, not some 1394 * arbitrary value. 1395 */ 1396 if (ctx.retval != 0 && ctx.retval != retval) { 1397 ret = -EFAULT; 1398 goto out; 1399 } 1400 1401 if (copy_to_user(optval, ctx.optval, ctx.optlen) || 1402 put_user(ctx.optlen, optlen)) { 1403 ret = -EFAULT; 1404 goto out; 1405 } 1406 1407 ret = ctx.retval; 1408 1409 out: 1410 sockopt_free_buf(&ctx); 1411 return ret; 1412 } 1413 EXPORT_SYMBOL(__cgroup_bpf_run_filter_getsockopt); 1414 #endif 1415 1416 static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp, 1417 size_t *lenp) 1418 { 1419 ssize_t tmp_ret = 0, ret; 1420 1421 if (dir->header.parent) { 1422 tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp); 1423 if (tmp_ret < 0) 1424 return tmp_ret; 1425 } 1426 1427 ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp); 1428 if (ret < 0) 1429 return ret; 1430 *bufp += ret; 1431 *lenp -= ret; 1432 ret += tmp_ret; 1433 1434 /* Avoid leading slash. */ 1435 if (!ret) 1436 return ret; 1437 1438 tmp_ret = strscpy(*bufp, "/", *lenp); 1439 if (tmp_ret < 0) 1440 return tmp_ret; 1441 *bufp += tmp_ret; 1442 *lenp -= tmp_ret; 1443 1444 return ret + tmp_ret; 1445 } 1446 1447 BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf, 1448 size_t, buf_len, u64, flags) 1449 { 1450 ssize_t tmp_ret = 0, ret; 1451 1452 if (!buf) 1453 return -EINVAL; 1454 1455 if (!(flags & BPF_F_SYSCTL_BASE_NAME)) { 1456 if (!ctx->head) 1457 return -EINVAL; 1458 tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len); 1459 if (tmp_ret < 0) 1460 return tmp_ret; 1461 } 1462 1463 ret = strscpy(buf, ctx->table->procname, buf_len); 1464 1465 return ret < 0 ? ret : tmp_ret + ret; 1466 } 1467 1468 static const struct bpf_func_proto bpf_sysctl_get_name_proto = { 1469 .func = bpf_sysctl_get_name, 1470 .gpl_only = false, 1471 .ret_type = RET_INTEGER, 1472 .arg1_type = ARG_PTR_TO_CTX, 1473 .arg2_type = ARG_PTR_TO_MEM, 1474 .arg3_type = ARG_CONST_SIZE, 1475 .arg4_type = ARG_ANYTHING, 1476 }; 1477 1478 static int copy_sysctl_value(char *dst, size_t dst_len, char *src, 1479 size_t src_len) 1480 { 1481 if (!dst) 1482 return -EINVAL; 1483 1484 if (!dst_len) 1485 return -E2BIG; 1486 1487 if (!src || !src_len) { 1488 memset(dst, 0, dst_len); 1489 return -EINVAL; 1490 } 1491 1492 memcpy(dst, src, min(dst_len, src_len)); 1493 1494 if (dst_len > src_len) { 1495 memset(dst + src_len, '\0', dst_len - src_len); 1496 return src_len; 1497 } 1498 1499 dst[dst_len - 1] = '\0'; 1500 1501 return -E2BIG; 1502 } 1503 1504 BPF_CALL_3(bpf_sysctl_get_current_value, struct bpf_sysctl_kern *, ctx, 1505 char *, buf, size_t, buf_len) 1506 { 1507 return copy_sysctl_value(buf, buf_len, ctx->cur_val, ctx->cur_len); 1508 } 1509 1510 static const struct bpf_func_proto bpf_sysctl_get_current_value_proto = { 1511 .func = bpf_sysctl_get_current_value, 1512 .gpl_only = false, 1513 .ret_type = RET_INTEGER, 1514 .arg1_type = ARG_PTR_TO_CTX, 1515 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1516 .arg3_type = ARG_CONST_SIZE, 1517 }; 1518 1519 BPF_CALL_3(bpf_sysctl_get_new_value, struct bpf_sysctl_kern *, ctx, char *, buf, 1520 size_t, buf_len) 1521 { 1522 if (!ctx->write) { 1523 if (buf && buf_len) 1524 memset(buf, '\0', buf_len); 1525 return -EINVAL; 1526 } 1527 return copy_sysctl_value(buf, buf_len, ctx->new_val, ctx->new_len); 1528 } 1529 1530 static const struct bpf_func_proto bpf_sysctl_get_new_value_proto = { 1531 .func = bpf_sysctl_get_new_value, 1532 .gpl_only = false, 1533 .ret_type = RET_INTEGER, 1534 .arg1_type = ARG_PTR_TO_CTX, 1535 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1536 .arg3_type = ARG_CONST_SIZE, 1537 }; 1538 1539 BPF_CALL_3(bpf_sysctl_set_new_value, struct bpf_sysctl_kern *, ctx, 1540 const char *, buf, size_t, buf_len) 1541 { 1542 if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len) 1543 return -EINVAL; 1544 1545 if (buf_len > PAGE_SIZE - 1) 1546 return -E2BIG; 1547 1548 memcpy(ctx->new_val, buf, buf_len); 1549 ctx->new_len = buf_len; 1550 ctx->new_updated = 1; 1551 1552 return 0; 1553 } 1554 1555 static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = { 1556 .func = bpf_sysctl_set_new_value, 1557 .gpl_only = false, 1558 .ret_type = RET_INTEGER, 1559 .arg1_type = ARG_PTR_TO_CTX, 1560 .arg2_type = ARG_PTR_TO_MEM, 1561 .arg3_type = ARG_CONST_SIZE, 1562 }; 1563 1564 static const struct bpf_func_proto * 1565 sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1566 { 1567 switch (func_id) { 1568 case BPF_FUNC_strtol: 1569 return &bpf_strtol_proto; 1570 case BPF_FUNC_strtoul: 1571 return &bpf_strtoul_proto; 1572 case BPF_FUNC_sysctl_get_name: 1573 return &bpf_sysctl_get_name_proto; 1574 case BPF_FUNC_sysctl_get_current_value: 1575 return &bpf_sysctl_get_current_value_proto; 1576 case BPF_FUNC_sysctl_get_new_value: 1577 return &bpf_sysctl_get_new_value_proto; 1578 case BPF_FUNC_sysctl_set_new_value: 1579 return &bpf_sysctl_set_new_value_proto; 1580 default: 1581 return cgroup_base_func_proto(func_id, prog); 1582 } 1583 } 1584 1585 static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type, 1586 const struct bpf_prog *prog, 1587 struct bpf_insn_access_aux *info) 1588 { 1589 const int size_default = sizeof(__u32); 1590 1591 if (off < 0 || off + size > sizeof(struct bpf_sysctl) || off % size) 1592 return false; 1593 1594 switch (off) { 1595 case bpf_ctx_range(struct bpf_sysctl, write): 1596 if (type != BPF_READ) 1597 return false; 1598 bpf_ctx_record_field_size(info, size_default); 1599 return bpf_ctx_narrow_access_ok(off, size, size_default); 1600 case bpf_ctx_range(struct bpf_sysctl, file_pos): 1601 if (type == BPF_READ) { 1602 bpf_ctx_record_field_size(info, size_default); 1603 return bpf_ctx_narrow_access_ok(off, size, size_default); 1604 } else { 1605 return size == size_default; 1606 } 1607 default: 1608 return false; 1609 } 1610 } 1611 1612 static u32 sysctl_convert_ctx_access(enum bpf_access_type type, 1613 const struct bpf_insn *si, 1614 struct bpf_insn *insn_buf, 1615 struct bpf_prog *prog, u32 *target_size) 1616 { 1617 struct bpf_insn *insn = insn_buf; 1618 u32 read_size; 1619 1620 switch (si->off) { 1621 case offsetof(struct bpf_sysctl, write): 1622 *insn++ = BPF_LDX_MEM( 1623 BPF_SIZE(si->code), si->dst_reg, si->src_reg, 1624 bpf_target_off(struct bpf_sysctl_kern, write, 1625 sizeof_field(struct bpf_sysctl_kern, 1626 write), 1627 target_size)); 1628 break; 1629 case offsetof(struct bpf_sysctl, file_pos): 1630 /* ppos is a pointer so it should be accessed via indirect 1631 * loads and stores. Also for stores additional temporary 1632 * register is used since neither src_reg nor dst_reg can be 1633 * overridden. 1634 */ 1635 if (type == BPF_WRITE) { 1636 int treg = BPF_REG_9; 1637 1638 if (si->src_reg == treg || si->dst_reg == treg) 1639 --treg; 1640 if (si->src_reg == treg || si->dst_reg == treg) 1641 --treg; 1642 *insn++ = BPF_STX_MEM( 1643 BPF_DW, si->dst_reg, treg, 1644 offsetof(struct bpf_sysctl_kern, tmp_reg)); 1645 *insn++ = BPF_LDX_MEM( 1646 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos), 1647 treg, si->dst_reg, 1648 offsetof(struct bpf_sysctl_kern, ppos)); 1649 *insn++ = BPF_STX_MEM( 1650 BPF_SIZEOF(u32), treg, si->src_reg, 1651 bpf_ctx_narrow_access_offset( 1652 0, sizeof(u32), sizeof(loff_t))); 1653 *insn++ = BPF_LDX_MEM( 1654 BPF_DW, treg, si->dst_reg, 1655 offsetof(struct bpf_sysctl_kern, tmp_reg)); 1656 } else { 1657 *insn++ = BPF_LDX_MEM( 1658 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos), 1659 si->dst_reg, si->src_reg, 1660 offsetof(struct bpf_sysctl_kern, ppos)); 1661 read_size = bpf_size_to_bytes(BPF_SIZE(si->code)); 1662 *insn++ = BPF_LDX_MEM( 1663 BPF_SIZE(si->code), si->dst_reg, si->dst_reg, 1664 bpf_ctx_narrow_access_offset( 1665 0, read_size, sizeof(loff_t))); 1666 } 1667 *target_size = sizeof(u32); 1668 break; 1669 } 1670 1671 return insn - insn_buf; 1672 } 1673 1674 const struct bpf_verifier_ops cg_sysctl_verifier_ops = { 1675 .get_func_proto = sysctl_func_proto, 1676 .is_valid_access = sysctl_is_valid_access, 1677 .convert_ctx_access = sysctl_convert_ctx_access, 1678 }; 1679 1680 const struct bpf_prog_ops cg_sysctl_prog_ops = { 1681 }; 1682 1683 static const struct bpf_func_proto * 1684 cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1685 { 1686 switch (func_id) { 1687 #ifdef CONFIG_NET 1688 case BPF_FUNC_sk_storage_get: 1689 return &bpf_sk_storage_get_proto; 1690 case BPF_FUNC_sk_storage_delete: 1691 return &bpf_sk_storage_delete_proto; 1692 #endif 1693 #ifdef CONFIG_INET 1694 case BPF_FUNC_tcp_sock: 1695 return &bpf_tcp_sock_proto; 1696 #endif 1697 default: 1698 return cgroup_base_func_proto(func_id, prog); 1699 } 1700 } 1701 1702 static bool cg_sockopt_is_valid_access(int off, int size, 1703 enum bpf_access_type type, 1704 const struct bpf_prog *prog, 1705 struct bpf_insn_access_aux *info) 1706 { 1707 const int size_default = sizeof(__u32); 1708 1709 if (off < 0 || off >= sizeof(struct bpf_sockopt)) 1710 return false; 1711 1712 if (off % size != 0) 1713 return false; 1714 1715 if (type == BPF_WRITE) { 1716 switch (off) { 1717 case offsetof(struct bpf_sockopt, retval): 1718 if (size != size_default) 1719 return false; 1720 return prog->expected_attach_type == 1721 BPF_CGROUP_GETSOCKOPT; 1722 case offsetof(struct bpf_sockopt, optname): 1723 /* fallthrough */ 1724 case offsetof(struct bpf_sockopt, level): 1725 if (size != size_default) 1726 return false; 1727 return prog->expected_attach_type == 1728 BPF_CGROUP_SETSOCKOPT; 1729 case offsetof(struct bpf_sockopt, optlen): 1730 return size == size_default; 1731 default: 1732 return false; 1733 } 1734 } 1735 1736 switch (off) { 1737 case offsetof(struct bpf_sockopt, sk): 1738 if (size != sizeof(__u64)) 1739 return false; 1740 info->reg_type = PTR_TO_SOCKET; 1741 break; 1742 case offsetof(struct bpf_sockopt, optval): 1743 if (size != sizeof(__u64)) 1744 return false; 1745 info->reg_type = PTR_TO_PACKET; 1746 break; 1747 case offsetof(struct bpf_sockopt, optval_end): 1748 if (size != sizeof(__u64)) 1749 return false; 1750 info->reg_type = PTR_TO_PACKET_END; 1751 break; 1752 case offsetof(struct bpf_sockopt, retval): 1753 if (size != size_default) 1754 return false; 1755 return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT; 1756 default: 1757 if (size != size_default) 1758 return false; 1759 break; 1760 } 1761 return true; 1762 } 1763 1764 #define CG_SOCKOPT_ACCESS_FIELD(T, F) \ 1765 T(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F), \ 1766 si->dst_reg, si->src_reg, \ 1767 offsetof(struct bpf_sockopt_kern, F)) 1768 1769 static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type, 1770 const struct bpf_insn *si, 1771 struct bpf_insn *insn_buf, 1772 struct bpf_prog *prog, 1773 u32 *target_size) 1774 { 1775 struct bpf_insn *insn = insn_buf; 1776 1777 switch (si->off) { 1778 case offsetof(struct bpf_sockopt, sk): 1779 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, sk); 1780 break; 1781 case offsetof(struct bpf_sockopt, level): 1782 if (type == BPF_WRITE) 1783 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, level); 1784 else 1785 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, level); 1786 break; 1787 case offsetof(struct bpf_sockopt, optname): 1788 if (type == BPF_WRITE) 1789 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optname); 1790 else 1791 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optname); 1792 break; 1793 case offsetof(struct bpf_sockopt, optlen): 1794 if (type == BPF_WRITE) 1795 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optlen); 1796 else 1797 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optlen); 1798 break; 1799 case offsetof(struct bpf_sockopt, retval): 1800 if (type == BPF_WRITE) 1801 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, retval); 1802 else 1803 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, retval); 1804 break; 1805 case offsetof(struct bpf_sockopt, optval): 1806 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval); 1807 break; 1808 case offsetof(struct bpf_sockopt, optval_end): 1809 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval_end); 1810 break; 1811 } 1812 1813 return insn - insn_buf; 1814 } 1815 1816 static int cg_sockopt_get_prologue(struct bpf_insn *insn_buf, 1817 bool direct_write, 1818 const struct bpf_prog *prog) 1819 { 1820 /* Nothing to do for sockopt argument. The data is kzalloc'ated. 1821 */ 1822 return 0; 1823 } 1824 1825 const struct bpf_verifier_ops cg_sockopt_verifier_ops = { 1826 .get_func_proto = cg_sockopt_func_proto, 1827 .is_valid_access = cg_sockopt_is_valid_access, 1828 .convert_ctx_access = cg_sockopt_convert_ctx_access, 1829 .gen_prologue = cg_sockopt_get_prologue, 1830 }; 1831 1832 const struct bpf_prog_ops cg_sockopt_prog_ops = { 1833 }; 1834