1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Functions to manage eBPF programs attached to cgroups 4 * 5 * Copyright (c) 2016 Daniel Mack 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/atomic.h> 10 #include <linux/cgroup.h> 11 #include <linux/filter.h> 12 #include <linux/slab.h> 13 #include <linux/sysctl.h> 14 #include <linux/string.h> 15 #include <linux/bpf.h> 16 #include <linux/bpf-cgroup.h> 17 #include <net/sock.h> 18 #include <net/bpf_sk_storage.h> 19 20 #include "../cgroup/cgroup-internal.h" 21 22 DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key); 23 EXPORT_SYMBOL(cgroup_bpf_enabled_key); 24 25 void cgroup_bpf_offline(struct cgroup *cgrp) 26 { 27 cgroup_get(cgrp); 28 percpu_ref_kill(&cgrp->bpf.refcnt); 29 } 30 31 static void bpf_cgroup_storages_free(struct bpf_cgroup_storage *storages[]) 32 { 33 enum bpf_cgroup_storage_type stype; 34 35 for_each_cgroup_storage_type(stype) 36 bpf_cgroup_storage_free(storages[stype]); 37 } 38 39 static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage *storages[], 40 struct bpf_prog *prog) 41 { 42 enum bpf_cgroup_storage_type stype; 43 44 for_each_cgroup_storage_type(stype) { 45 storages[stype] = bpf_cgroup_storage_alloc(prog, stype); 46 if (IS_ERR(storages[stype])) { 47 storages[stype] = NULL; 48 bpf_cgroup_storages_free(storages); 49 return -ENOMEM; 50 } 51 } 52 53 return 0; 54 } 55 56 static void bpf_cgroup_storages_assign(struct bpf_cgroup_storage *dst[], 57 struct bpf_cgroup_storage *src[]) 58 { 59 enum bpf_cgroup_storage_type stype; 60 61 for_each_cgroup_storage_type(stype) 62 dst[stype] = src[stype]; 63 } 64 65 static void bpf_cgroup_storages_link(struct bpf_cgroup_storage *storages[], 66 struct cgroup* cgrp, 67 enum bpf_attach_type attach_type) 68 { 69 enum bpf_cgroup_storage_type stype; 70 71 for_each_cgroup_storage_type(stype) 72 bpf_cgroup_storage_link(storages[stype], cgrp, attach_type); 73 } 74 75 static void bpf_cgroup_storages_unlink(struct bpf_cgroup_storage *storages[]) 76 { 77 enum bpf_cgroup_storage_type stype; 78 79 for_each_cgroup_storage_type(stype) 80 bpf_cgroup_storage_unlink(storages[stype]); 81 } 82 83 /* Called when bpf_cgroup_link is auto-detached from dying cgroup. 84 * It drops cgroup and bpf_prog refcounts, and marks bpf_link as defunct. It 85 * doesn't free link memory, which will eventually be done by bpf_link's 86 * release() callback, when its last FD is closed. 87 */ 88 static void bpf_cgroup_link_auto_detach(struct bpf_cgroup_link *link) 89 { 90 cgroup_put(link->cgroup); 91 link->cgroup = NULL; 92 } 93 94 /** 95 * cgroup_bpf_release() - put references of all bpf programs and 96 * release all cgroup bpf data 97 * @work: work structure embedded into the cgroup to modify 98 */ 99 static void cgroup_bpf_release(struct work_struct *work) 100 { 101 struct cgroup *p, *cgrp = container_of(work, struct cgroup, 102 bpf.release_work); 103 struct bpf_prog_array *old_array; 104 unsigned int type; 105 106 mutex_lock(&cgroup_mutex); 107 108 for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) { 109 struct list_head *progs = &cgrp->bpf.progs[type]; 110 struct bpf_prog_list *pl, *tmp; 111 112 list_for_each_entry_safe(pl, tmp, progs, node) { 113 list_del(&pl->node); 114 if (pl->prog) 115 bpf_prog_put(pl->prog); 116 if (pl->link) 117 bpf_cgroup_link_auto_detach(pl->link); 118 bpf_cgroup_storages_unlink(pl->storage); 119 bpf_cgroup_storages_free(pl->storage); 120 kfree(pl); 121 static_branch_dec(&cgroup_bpf_enabled_key); 122 } 123 old_array = rcu_dereference_protected( 124 cgrp->bpf.effective[type], 125 lockdep_is_held(&cgroup_mutex)); 126 bpf_prog_array_free(old_array); 127 } 128 129 mutex_unlock(&cgroup_mutex); 130 131 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) 132 cgroup_bpf_put(p); 133 134 percpu_ref_exit(&cgrp->bpf.refcnt); 135 cgroup_put(cgrp); 136 } 137 138 /** 139 * cgroup_bpf_release_fn() - callback used to schedule releasing 140 * of bpf cgroup data 141 * @ref: percpu ref counter structure 142 */ 143 static void cgroup_bpf_release_fn(struct percpu_ref *ref) 144 { 145 struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt); 146 147 INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release); 148 queue_work(system_wq, &cgrp->bpf.release_work); 149 } 150 151 /* Get underlying bpf_prog of bpf_prog_list entry, regardless if it's through 152 * link or direct prog. 153 */ 154 static struct bpf_prog *prog_list_prog(struct bpf_prog_list *pl) 155 { 156 if (pl->prog) 157 return pl->prog; 158 if (pl->link) 159 return pl->link->link.prog; 160 return NULL; 161 } 162 163 /* count number of elements in the list. 164 * it's slow but the list cannot be long 165 */ 166 static u32 prog_list_length(struct list_head *head) 167 { 168 struct bpf_prog_list *pl; 169 u32 cnt = 0; 170 171 list_for_each_entry(pl, head, node) { 172 if (!prog_list_prog(pl)) 173 continue; 174 cnt++; 175 } 176 return cnt; 177 } 178 179 /* if parent has non-overridable prog attached, 180 * disallow attaching new programs to the descendent cgroup. 181 * if parent has overridable or multi-prog, allow attaching 182 */ 183 static bool hierarchy_allows_attach(struct cgroup *cgrp, 184 enum bpf_attach_type type) 185 { 186 struct cgroup *p; 187 188 p = cgroup_parent(cgrp); 189 if (!p) 190 return true; 191 do { 192 u32 flags = p->bpf.flags[type]; 193 u32 cnt; 194 195 if (flags & BPF_F_ALLOW_MULTI) 196 return true; 197 cnt = prog_list_length(&p->bpf.progs[type]); 198 WARN_ON_ONCE(cnt > 1); 199 if (cnt == 1) 200 return !!(flags & BPF_F_ALLOW_OVERRIDE); 201 p = cgroup_parent(p); 202 } while (p); 203 return true; 204 } 205 206 /* compute a chain of effective programs for a given cgroup: 207 * start from the list of programs in this cgroup and add 208 * all parent programs. 209 * Note that parent's F_ALLOW_OVERRIDE-type program is yielding 210 * to programs in this cgroup 211 */ 212 static int compute_effective_progs(struct cgroup *cgrp, 213 enum bpf_attach_type type, 214 struct bpf_prog_array **array) 215 { 216 struct bpf_prog_array_item *item; 217 struct bpf_prog_array *progs; 218 struct bpf_prog_list *pl; 219 struct cgroup *p = cgrp; 220 int cnt = 0; 221 222 /* count number of effective programs by walking parents */ 223 do { 224 if (cnt == 0 || (p->bpf.flags[type] & BPF_F_ALLOW_MULTI)) 225 cnt += prog_list_length(&p->bpf.progs[type]); 226 p = cgroup_parent(p); 227 } while (p); 228 229 progs = bpf_prog_array_alloc(cnt, GFP_KERNEL); 230 if (!progs) 231 return -ENOMEM; 232 233 /* populate the array with effective progs */ 234 cnt = 0; 235 p = cgrp; 236 do { 237 if (cnt > 0 && !(p->bpf.flags[type] & BPF_F_ALLOW_MULTI)) 238 continue; 239 240 list_for_each_entry(pl, &p->bpf.progs[type], node) { 241 if (!prog_list_prog(pl)) 242 continue; 243 244 item = &progs->items[cnt]; 245 item->prog = prog_list_prog(pl); 246 bpf_cgroup_storages_assign(item->cgroup_storage, 247 pl->storage); 248 cnt++; 249 } 250 } while ((p = cgroup_parent(p))); 251 252 *array = progs; 253 return 0; 254 } 255 256 static void activate_effective_progs(struct cgroup *cgrp, 257 enum bpf_attach_type type, 258 struct bpf_prog_array *old_array) 259 { 260 old_array = rcu_replace_pointer(cgrp->bpf.effective[type], old_array, 261 lockdep_is_held(&cgroup_mutex)); 262 /* free prog array after grace period, since __cgroup_bpf_run_*() 263 * might be still walking the array 264 */ 265 bpf_prog_array_free(old_array); 266 } 267 268 /** 269 * cgroup_bpf_inherit() - inherit effective programs from parent 270 * @cgrp: the cgroup to modify 271 */ 272 int cgroup_bpf_inherit(struct cgroup *cgrp) 273 { 274 /* has to use marco instead of const int, since compiler thinks 275 * that array below is variable length 276 */ 277 #define NR ARRAY_SIZE(cgrp->bpf.effective) 278 struct bpf_prog_array *arrays[NR] = {}; 279 struct cgroup *p; 280 int ret, i; 281 282 ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0, 283 GFP_KERNEL); 284 if (ret) 285 return ret; 286 287 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) 288 cgroup_bpf_get(p); 289 290 for (i = 0; i < NR; i++) 291 INIT_LIST_HEAD(&cgrp->bpf.progs[i]); 292 293 for (i = 0; i < NR; i++) 294 if (compute_effective_progs(cgrp, i, &arrays[i])) 295 goto cleanup; 296 297 for (i = 0; i < NR; i++) 298 activate_effective_progs(cgrp, i, arrays[i]); 299 300 return 0; 301 cleanup: 302 for (i = 0; i < NR; i++) 303 bpf_prog_array_free(arrays[i]); 304 305 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) 306 cgroup_bpf_put(p); 307 308 percpu_ref_exit(&cgrp->bpf.refcnt); 309 310 return -ENOMEM; 311 } 312 313 static int update_effective_progs(struct cgroup *cgrp, 314 enum bpf_attach_type type) 315 { 316 struct cgroup_subsys_state *css; 317 int err; 318 319 /* allocate and recompute effective prog arrays */ 320 css_for_each_descendant_pre(css, &cgrp->self) { 321 struct cgroup *desc = container_of(css, struct cgroup, self); 322 323 if (percpu_ref_is_zero(&desc->bpf.refcnt)) 324 continue; 325 326 err = compute_effective_progs(desc, type, &desc->bpf.inactive); 327 if (err) 328 goto cleanup; 329 } 330 331 /* all allocations were successful. Activate all prog arrays */ 332 css_for_each_descendant_pre(css, &cgrp->self) { 333 struct cgroup *desc = container_of(css, struct cgroup, self); 334 335 if (percpu_ref_is_zero(&desc->bpf.refcnt)) { 336 if (unlikely(desc->bpf.inactive)) { 337 bpf_prog_array_free(desc->bpf.inactive); 338 desc->bpf.inactive = NULL; 339 } 340 continue; 341 } 342 343 activate_effective_progs(desc, type, desc->bpf.inactive); 344 desc->bpf.inactive = NULL; 345 } 346 347 return 0; 348 349 cleanup: 350 /* oom while computing effective. Free all computed effective arrays 351 * since they were not activated 352 */ 353 css_for_each_descendant_pre(css, &cgrp->self) { 354 struct cgroup *desc = container_of(css, struct cgroup, self); 355 356 bpf_prog_array_free(desc->bpf.inactive); 357 desc->bpf.inactive = NULL; 358 } 359 360 return err; 361 } 362 363 #define BPF_CGROUP_MAX_PROGS 64 364 365 static struct bpf_prog_list *find_attach_entry(struct list_head *progs, 366 struct bpf_prog *prog, 367 struct bpf_cgroup_link *link, 368 struct bpf_prog *replace_prog, 369 bool allow_multi) 370 { 371 struct bpf_prog_list *pl; 372 373 /* single-attach case */ 374 if (!allow_multi) { 375 if (list_empty(progs)) 376 return NULL; 377 return list_first_entry(progs, typeof(*pl), node); 378 } 379 380 list_for_each_entry(pl, progs, node) { 381 if (prog && pl->prog == prog) 382 /* disallow attaching the same prog twice */ 383 return ERR_PTR(-EINVAL); 384 if (link && pl->link == link) 385 /* disallow attaching the same link twice */ 386 return ERR_PTR(-EINVAL); 387 } 388 389 /* direct prog multi-attach w/ replacement case */ 390 if (replace_prog) { 391 list_for_each_entry(pl, progs, node) { 392 if (pl->prog == replace_prog) 393 /* a match found */ 394 return pl; 395 } 396 /* prog to replace not found for cgroup */ 397 return ERR_PTR(-ENOENT); 398 } 399 400 return NULL; 401 } 402 403 /** 404 * __cgroup_bpf_attach() - Attach the program or the link to a cgroup, and 405 * propagate the change to descendants 406 * @cgrp: The cgroup which descendants to traverse 407 * @prog: A program to attach 408 * @link: A link to attach 409 * @replace_prog: Previously attached program to replace if BPF_F_REPLACE is set 410 * @type: Type of attach operation 411 * @flags: Option flags 412 * 413 * Exactly one of @prog or @link can be non-null. 414 * Must be called with cgroup_mutex held. 415 */ 416 int __cgroup_bpf_attach(struct cgroup *cgrp, 417 struct bpf_prog *prog, struct bpf_prog *replace_prog, 418 struct bpf_cgroup_link *link, 419 enum bpf_attach_type type, u32 flags) 420 { 421 u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI)); 422 struct list_head *progs = &cgrp->bpf.progs[type]; 423 struct bpf_prog *old_prog = NULL; 424 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {}; 425 struct bpf_cgroup_storage *old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {}; 426 struct bpf_prog_list *pl; 427 int err; 428 429 if (((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) || 430 ((flags & BPF_F_REPLACE) && !(flags & BPF_F_ALLOW_MULTI))) 431 /* invalid combination */ 432 return -EINVAL; 433 if (link && (prog || replace_prog)) 434 /* only either link or prog/replace_prog can be specified */ 435 return -EINVAL; 436 if (!!replace_prog != !!(flags & BPF_F_REPLACE)) 437 /* replace_prog implies BPF_F_REPLACE, and vice versa */ 438 return -EINVAL; 439 440 if (!hierarchy_allows_attach(cgrp, type)) 441 return -EPERM; 442 443 if (!list_empty(progs) && cgrp->bpf.flags[type] != saved_flags) 444 /* Disallow attaching non-overridable on top 445 * of existing overridable in this cgroup. 446 * Disallow attaching multi-prog if overridable or none 447 */ 448 return -EPERM; 449 450 if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS) 451 return -E2BIG; 452 453 pl = find_attach_entry(progs, prog, link, replace_prog, 454 flags & BPF_F_ALLOW_MULTI); 455 if (IS_ERR(pl)) 456 return PTR_ERR(pl); 457 458 if (bpf_cgroup_storages_alloc(storage, prog ? : link->link.prog)) 459 return -ENOMEM; 460 461 if (pl) { 462 old_prog = pl->prog; 463 bpf_cgroup_storages_unlink(pl->storage); 464 bpf_cgroup_storages_assign(old_storage, pl->storage); 465 } else { 466 pl = kmalloc(sizeof(*pl), GFP_KERNEL); 467 if (!pl) { 468 bpf_cgroup_storages_free(storage); 469 return -ENOMEM; 470 } 471 list_add_tail(&pl->node, progs); 472 } 473 474 pl->prog = prog; 475 pl->link = link; 476 bpf_cgroup_storages_assign(pl->storage, storage); 477 cgrp->bpf.flags[type] = saved_flags; 478 479 err = update_effective_progs(cgrp, type); 480 if (err) 481 goto cleanup; 482 483 bpf_cgroup_storages_free(old_storage); 484 if (old_prog) 485 bpf_prog_put(old_prog); 486 else 487 static_branch_inc(&cgroup_bpf_enabled_key); 488 bpf_cgroup_storages_link(pl->storage, cgrp, type); 489 return 0; 490 491 cleanup: 492 if (old_prog) { 493 pl->prog = old_prog; 494 pl->link = NULL; 495 } 496 bpf_cgroup_storages_free(pl->storage); 497 bpf_cgroup_storages_assign(pl->storage, old_storage); 498 bpf_cgroup_storages_link(pl->storage, cgrp, type); 499 if (!old_prog) { 500 list_del(&pl->node); 501 kfree(pl); 502 } 503 return err; 504 } 505 506 /* Swap updated BPF program for given link in effective program arrays across 507 * all descendant cgroups. This function is guaranteed to succeed. 508 */ 509 static void replace_effective_prog(struct cgroup *cgrp, 510 enum bpf_attach_type type, 511 struct bpf_cgroup_link *link) 512 { 513 struct bpf_prog_array_item *item; 514 struct cgroup_subsys_state *css; 515 struct bpf_prog_array *progs; 516 struct bpf_prog_list *pl; 517 struct list_head *head; 518 struct cgroup *cg; 519 int pos; 520 521 css_for_each_descendant_pre(css, &cgrp->self) { 522 struct cgroup *desc = container_of(css, struct cgroup, self); 523 524 if (percpu_ref_is_zero(&desc->bpf.refcnt)) 525 continue; 526 527 /* find position of link in effective progs array */ 528 for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) { 529 if (pos && !(cg->bpf.flags[type] & BPF_F_ALLOW_MULTI)) 530 continue; 531 532 head = &cg->bpf.progs[type]; 533 list_for_each_entry(pl, head, node) { 534 if (!prog_list_prog(pl)) 535 continue; 536 if (pl->link == link) 537 goto found; 538 pos++; 539 } 540 } 541 found: 542 BUG_ON(!cg); 543 progs = rcu_dereference_protected( 544 desc->bpf.effective[type], 545 lockdep_is_held(&cgroup_mutex)); 546 item = &progs->items[pos]; 547 WRITE_ONCE(item->prog, link->link.prog); 548 } 549 } 550 551 /** 552 * __cgroup_bpf_replace() - Replace link's program and propagate the change 553 * to descendants 554 * @cgrp: The cgroup which descendants to traverse 555 * @link: A link for which to replace BPF program 556 * @type: Type of attach operation 557 * 558 * Must be called with cgroup_mutex held. 559 */ 560 int __cgroup_bpf_replace(struct cgroup *cgrp, struct bpf_cgroup_link *link, 561 struct bpf_prog *new_prog) 562 { 563 struct list_head *progs = &cgrp->bpf.progs[link->type]; 564 struct bpf_prog *old_prog; 565 struct bpf_prog_list *pl; 566 bool found = false; 567 568 if (link->link.prog->type != new_prog->type) 569 return -EINVAL; 570 571 list_for_each_entry(pl, progs, node) { 572 if (pl->link == link) { 573 found = true; 574 break; 575 } 576 } 577 if (!found) 578 return -ENOENT; 579 580 old_prog = xchg(&link->link.prog, new_prog); 581 replace_effective_prog(cgrp, link->type, link); 582 bpf_prog_put(old_prog); 583 return 0; 584 } 585 586 static struct bpf_prog_list *find_detach_entry(struct list_head *progs, 587 struct bpf_prog *prog, 588 struct bpf_cgroup_link *link, 589 bool allow_multi) 590 { 591 struct bpf_prog_list *pl; 592 593 if (!allow_multi) { 594 if (list_empty(progs)) 595 /* report error when trying to detach and nothing is attached */ 596 return ERR_PTR(-ENOENT); 597 598 /* to maintain backward compatibility NONE and OVERRIDE cgroups 599 * allow detaching with invalid FD (prog==NULL) in legacy mode 600 */ 601 return list_first_entry(progs, typeof(*pl), node); 602 } 603 604 if (!prog && !link) 605 /* to detach MULTI prog the user has to specify valid FD 606 * of the program or link to be detached 607 */ 608 return ERR_PTR(-EINVAL); 609 610 /* find the prog or link and detach it */ 611 list_for_each_entry(pl, progs, node) { 612 if (pl->prog == prog && pl->link == link) 613 return pl; 614 } 615 return ERR_PTR(-ENOENT); 616 } 617 618 /** 619 * __cgroup_bpf_detach() - Detach the program or link from a cgroup, and 620 * propagate the change to descendants 621 * @cgrp: The cgroup which descendants to traverse 622 * @prog: A program to detach or NULL 623 * @prog: A link to detach or NULL 624 * @type: Type of detach operation 625 * 626 * At most one of @prog or @link can be non-NULL. 627 * Must be called with cgroup_mutex held. 628 */ 629 int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, 630 struct bpf_cgroup_link *link, enum bpf_attach_type type) 631 { 632 struct list_head *progs = &cgrp->bpf.progs[type]; 633 u32 flags = cgrp->bpf.flags[type]; 634 struct bpf_prog_list *pl; 635 struct bpf_prog *old_prog; 636 int err; 637 638 if (prog && link) 639 /* only one of prog or link can be specified */ 640 return -EINVAL; 641 642 pl = find_detach_entry(progs, prog, link, flags & BPF_F_ALLOW_MULTI); 643 if (IS_ERR(pl)) 644 return PTR_ERR(pl); 645 646 /* mark it deleted, so it's ignored while recomputing effective */ 647 old_prog = pl->prog; 648 pl->prog = NULL; 649 pl->link = NULL; 650 651 err = update_effective_progs(cgrp, type); 652 if (err) 653 goto cleanup; 654 655 /* now can actually delete it from this cgroup list */ 656 list_del(&pl->node); 657 bpf_cgroup_storages_unlink(pl->storage); 658 bpf_cgroup_storages_free(pl->storage); 659 kfree(pl); 660 if (list_empty(progs)) 661 /* last program was detached, reset flags to zero */ 662 cgrp->bpf.flags[type] = 0; 663 if (old_prog) 664 bpf_prog_put(old_prog); 665 static_branch_dec(&cgroup_bpf_enabled_key); 666 return 0; 667 668 cleanup: 669 /* restore back prog or link */ 670 pl->prog = old_prog; 671 pl->link = link; 672 return err; 673 } 674 675 /* Must be called with cgroup_mutex held to avoid races. */ 676 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, 677 union bpf_attr __user *uattr) 678 { 679 __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); 680 enum bpf_attach_type type = attr->query.attach_type; 681 struct list_head *progs = &cgrp->bpf.progs[type]; 682 u32 flags = cgrp->bpf.flags[type]; 683 struct bpf_prog_array *effective; 684 struct bpf_prog *prog; 685 int cnt, ret = 0, i; 686 687 effective = rcu_dereference_protected(cgrp->bpf.effective[type], 688 lockdep_is_held(&cgroup_mutex)); 689 690 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) 691 cnt = bpf_prog_array_length(effective); 692 else 693 cnt = prog_list_length(progs); 694 695 if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags))) 696 return -EFAULT; 697 if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt))) 698 return -EFAULT; 699 if (attr->query.prog_cnt == 0 || !prog_ids || !cnt) 700 /* return early if user requested only program count + flags */ 701 return 0; 702 if (attr->query.prog_cnt < cnt) { 703 cnt = attr->query.prog_cnt; 704 ret = -ENOSPC; 705 } 706 707 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) { 708 return bpf_prog_array_copy_to_user(effective, prog_ids, cnt); 709 } else { 710 struct bpf_prog_list *pl; 711 u32 id; 712 713 i = 0; 714 list_for_each_entry(pl, progs, node) { 715 prog = prog_list_prog(pl); 716 id = prog->aux->id; 717 if (copy_to_user(prog_ids + i, &id, sizeof(id))) 718 return -EFAULT; 719 if (++i == cnt) 720 break; 721 } 722 } 723 return ret; 724 } 725 726 int cgroup_bpf_prog_attach(const union bpf_attr *attr, 727 enum bpf_prog_type ptype, struct bpf_prog *prog) 728 { 729 struct bpf_prog *replace_prog = NULL; 730 struct cgroup *cgrp; 731 int ret; 732 733 cgrp = cgroup_get_from_fd(attr->target_fd); 734 if (IS_ERR(cgrp)) 735 return PTR_ERR(cgrp); 736 737 if ((attr->attach_flags & BPF_F_ALLOW_MULTI) && 738 (attr->attach_flags & BPF_F_REPLACE)) { 739 replace_prog = bpf_prog_get_type(attr->replace_bpf_fd, ptype); 740 if (IS_ERR(replace_prog)) { 741 cgroup_put(cgrp); 742 return PTR_ERR(replace_prog); 743 } 744 } 745 746 ret = cgroup_bpf_attach(cgrp, prog, replace_prog, NULL, 747 attr->attach_type, attr->attach_flags); 748 749 if (replace_prog) 750 bpf_prog_put(replace_prog); 751 cgroup_put(cgrp); 752 return ret; 753 } 754 755 int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) 756 { 757 struct bpf_prog *prog; 758 struct cgroup *cgrp; 759 int ret; 760 761 cgrp = cgroup_get_from_fd(attr->target_fd); 762 if (IS_ERR(cgrp)) 763 return PTR_ERR(cgrp); 764 765 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 766 if (IS_ERR(prog)) 767 prog = NULL; 768 769 ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type); 770 if (prog) 771 bpf_prog_put(prog); 772 773 cgroup_put(cgrp); 774 return ret; 775 } 776 777 static void bpf_cgroup_link_release(struct bpf_link *link) 778 { 779 struct bpf_cgroup_link *cg_link = 780 container_of(link, struct bpf_cgroup_link, link); 781 782 /* link might have been auto-detached by dying cgroup already, 783 * in that case our work is done here 784 */ 785 if (!cg_link->cgroup) 786 return; 787 788 mutex_lock(&cgroup_mutex); 789 790 /* re-check cgroup under lock again */ 791 if (!cg_link->cgroup) { 792 mutex_unlock(&cgroup_mutex); 793 return; 794 } 795 796 WARN_ON(__cgroup_bpf_detach(cg_link->cgroup, NULL, cg_link, 797 cg_link->type)); 798 799 mutex_unlock(&cgroup_mutex); 800 cgroup_put(cg_link->cgroup); 801 } 802 803 static void bpf_cgroup_link_dealloc(struct bpf_link *link) 804 { 805 struct bpf_cgroup_link *cg_link = 806 container_of(link, struct bpf_cgroup_link, link); 807 808 kfree(cg_link); 809 } 810 811 const struct bpf_link_ops bpf_cgroup_link_lops = { 812 .release = bpf_cgroup_link_release, 813 .dealloc = bpf_cgroup_link_dealloc, 814 }; 815 816 int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 817 { 818 struct bpf_cgroup_link *link; 819 struct file *link_file; 820 struct cgroup *cgrp; 821 int err, link_fd; 822 823 if (attr->link_create.flags) 824 return -EINVAL; 825 826 cgrp = cgroup_get_from_fd(attr->link_create.target_fd); 827 if (IS_ERR(cgrp)) 828 return PTR_ERR(cgrp); 829 830 link = kzalloc(sizeof(*link), GFP_USER); 831 if (!link) { 832 err = -ENOMEM; 833 goto out_put_cgroup; 834 } 835 bpf_link_init(&link->link, &bpf_cgroup_link_lops, prog); 836 link->cgroup = cgrp; 837 link->type = attr->link_create.attach_type; 838 839 link_file = bpf_link_new_file(&link->link, &link_fd); 840 if (IS_ERR(link_file)) { 841 kfree(link); 842 err = PTR_ERR(link_file); 843 goto out_put_cgroup; 844 } 845 846 err = cgroup_bpf_attach(cgrp, NULL, NULL, link, link->type, 847 BPF_F_ALLOW_MULTI); 848 if (err) { 849 bpf_link_cleanup(&link->link, link_file, link_fd); 850 goto out_put_cgroup; 851 } 852 853 fd_install(link_fd, link_file); 854 return link_fd; 855 856 out_put_cgroup: 857 cgroup_put(cgrp); 858 return err; 859 } 860 861 int cgroup_bpf_prog_query(const union bpf_attr *attr, 862 union bpf_attr __user *uattr) 863 { 864 struct cgroup *cgrp; 865 int ret; 866 867 cgrp = cgroup_get_from_fd(attr->query.target_fd); 868 if (IS_ERR(cgrp)) 869 return PTR_ERR(cgrp); 870 871 ret = cgroup_bpf_query(cgrp, attr, uattr); 872 873 cgroup_put(cgrp); 874 return ret; 875 } 876 877 /** 878 * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering 879 * @sk: The socket sending or receiving traffic 880 * @skb: The skb that is being sent or received 881 * @type: The type of program to be exectuted 882 * 883 * If no socket is passed, or the socket is not of type INET or INET6, 884 * this function does nothing and returns 0. 885 * 886 * The program type passed in via @type must be suitable for network 887 * filtering. No further check is performed to assert that. 888 * 889 * For egress packets, this function can return: 890 * NET_XMIT_SUCCESS (0) - continue with packet output 891 * NET_XMIT_DROP (1) - drop packet and notify TCP to call cwr 892 * NET_XMIT_CN (2) - continue with packet output and notify TCP 893 * to call cwr 894 * -EPERM - drop packet 895 * 896 * For ingress packets, this function will return -EPERM if any 897 * attached program was found and if it returned != 1 during execution. 898 * Otherwise 0 is returned. 899 */ 900 int __cgroup_bpf_run_filter_skb(struct sock *sk, 901 struct sk_buff *skb, 902 enum bpf_attach_type type) 903 { 904 unsigned int offset = skb->data - skb_network_header(skb); 905 struct sock *save_sk; 906 void *saved_data_end; 907 struct cgroup *cgrp; 908 int ret; 909 910 if (!sk || !sk_fullsock(sk)) 911 return 0; 912 913 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6) 914 return 0; 915 916 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 917 save_sk = skb->sk; 918 skb->sk = sk; 919 __skb_push(skb, offset); 920 921 /* compute pointers for the bpf prog */ 922 bpf_compute_and_save_data_end(skb, &saved_data_end); 923 924 if (type == BPF_CGROUP_INET_EGRESS) { 925 ret = BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY( 926 cgrp->bpf.effective[type], skb, __bpf_prog_run_save_cb); 927 } else { 928 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb, 929 __bpf_prog_run_save_cb); 930 ret = (ret == 1 ? 0 : -EPERM); 931 } 932 bpf_restore_data_end(skb, saved_data_end); 933 __skb_pull(skb, offset); 934 skb->sk = save_sk; 935 936 return ret; 937 } 938 EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb); 939 940 /** 941 * __cgroup_bpf_run_filter_sk() - Run a program on a sock 942 * @sk: sock structure to manipulate 943 * @type: The type of program to be exectuted 944 * 945 * socket is passed is expected to be of type INET or INET6. 946 * 947 * The program type passed in via @type must be suitable for sock 948 * filtering. No further check is performed to assert that. 949 * 950 * This function will return %-EPERM if any if an attached program was found 951 * and if it returned != 1 during execution. In all other cases, 0 is returned. 952 */ 953 int __cgroup_bpf_run_filter_sk(struct sock *sk, 954 enum bpf_attach_type type) 955 { 956 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 957 int ret; 958 959 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sk, BPF_PROG_RUN); 960 return ret == 1 ? 0 : -EPERM; 961 } 962 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk); 963 964 /** 965 * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and 966 * provided by user sockaddr 967 * @sk: sock struct that will use sockaddr 968 * @uaddr: sockaddr struct provided by user 969 * @type: The type of program to be exectuted 970 * @t_ctx: Pointer to attach type specific context 971 * 972 * socket is expected to be of type INET or INET6. 973 * 974 * This function will return %-EPERM if an attached program is found and 975 * returned value != 1 during execution. In all other cases, 0 is returned. 976 */ 977 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, 978 struct sockaddr *uaddr, 979 enum bpf_attach_type type, 980 void *t_ctx) 981 { 982 struct bpf_sock_addr_kern ctx = { 983 .sk = sk, 984 .uaddr = uaddr, 985 .t_ctx = t_ctx, 986 }; 987 struct sockaddr_storage unspec; 988 struct cgroup *cgrp; 989 int ret; 990 991 /* Check socket family since not all sockets represent network 992 * endpoint (e.g. AF_UNIX). 993 */ 994 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6) 995 return 0; 996 997 if (!ctx.uaddr) { 998 memset(&unspec, 0, sizeof(unspec)); 999 ctx.uaddr = (struct sockaddr *)&unspec; 1000 } 1001 1002 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1003 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN); 1004 1005 return ret == 1 ? 0 : -EPERM; 1006 } 1007 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr); 1008 1009 /** 1010 * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock 1011 * @sk: socket to get cgroup from 1012 * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains 1013 * sk with connection information (IP addresses, etc.) May not contain 1014 * cgroup info if it is a req sock. 1015 * @type: The type of program to be exectuted 1016 * 1017 * socket passed is expected to be of type INET or INET6. 1018 * 1019 * The program type passed in via @type must be suitable for sock_ops 1020 * filtering. No further check is performed to assert that. 1021 * 1022 * This function will return %-EPERM if any if an attached program was found 1023 * and if it returned != 1 during execution. In all other cases, 0 is returned. 1024 */ 1025 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, 1026 struct bpf_sock_ops_kern *sock_ops, 1027 enum bpf_attach_type type) 1028 { 1029 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1030 int ret; 1031 1032 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sock_ops, 1033 BPF_PROG_RUN); 1034 return ret == 1 ? 0 : -EPERM; 1035 } 1036 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops); 1037 1038 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, 1039 short access, enum bpf_attach_type type) 1040 { 1041 struct cgroup *cgrp; 1042 struct bpf_cgroup_dev_ctx ctx = { 1043 .access_type = (access << 16) | dev_type, 1044 .major = major, 1045 .minor = minor, 1046 }; 1047 int allow = 1; 1048 1049 rcu_read_lock(); 1050 cgrp = task_dfl_cgroup(current); 1051 allow = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, 1052 BPF_PROG_RUN); 1053 rcu_read_unlock(); 1054 1055 return !allow; 1056 } 1057 1058 static const struct bpf_func_proto * 1059 cgroup_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1060 { 1061 switch (func_id) { 1062 case BPF_FUNC_get_current_uid_gid: 1063 return &bpf_get_current_uid_gid_proto; 1064 case BPF_FUNC_get_local_storage: 1065 return &bpf_get_local_storage_proto; 1066 case BPF_FUNC_get_current_cgroup_id: 1067 return &bpf_get_current_cgroup_id_proto; 1068 case BPF_FUNC_perf_event_output: 1069 return &bpf_event_output_data_proto; 1070 default: 1071 return bpf_base_func_proto(func_id); 1072 } 1073 } 1074 1075 static const struct bpf_func_proto * 1076 cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1077 { 1078 return cgroup_base_func_proto(func_id, prog); 1079 } 1080 1081 static bool cgroup_dev_is_valid_access(int off, int size, 1082 enum bpf_access_type type, 1083 const struct bpf_prog *prog, 1084 struct bpf_insn_access_aux *info) 1085 { 1086 const int size_default = sizeof(__u32); 1087 1088 if (type == BPF_WRITE) 1089 return false; 1090 1091 if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx)) 1092 return false; 1093 /* The verifier guarantees that size > 0. */ 1094 if (off % size != 0) 1095 return false; 1096 1097 switch (off) { 1098 case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type): 1099 bpf_ctx_record_field_size(info, size_default); 1100 if (!bpf_ctx_narrow_access_ok(off, size, size_default)) 1101 return false; 1102 break; 1103 default: 1104 if (size != size_default) 1105 return false; 1106 } 1107 1108 return true; 1109 } 1110 1111 const struct bpf_prog_ops cg_dev_prog_ops = { 1112 }; 1113 1114 const struct bpf_verifier_ops cg_dev_verifier_ops = { 1115 .get_func_proto = cgroup_dev_func_proto, 1116 .is_valid_access = cgroup_dev_is_valid_access, 1117 }; 1118 1119 /** 1120 * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl 1121 * 1122 * @head: sysctl table header 1123 * @table: sysctl table 1124 * @write: sysctl is being read (= 0) or written (= 1) 1125 * @buf: pointer to buffer (in and out) 1126 * @pcount: value-result argument: value is size of buffer pointed to by @buf, 1127 * result is size of @new_buf if program set new value, initial value 1128 * otherwise 1129 * @ppos: value-result argument: value is position at which read from or write 1130 * to sysctl is happening, result is new position if program overrode it, 1131 * initial value otherwise 1132 * @type: type of program to be executed 1133 * 1134 * Program is run when sysctl is being accessed, either read or written, and 1135 * can allow or deny such access. 1136 * 1137 * This function will return %-EPERM if an attached program is found and 1138 * returned value != 1 during execution. In all other cases 0 is returned. 1139 */ 1140 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, 1141 struct ctl_table *table, int write, 1142 void **buf, size_t *pcount, loff_t *ppos, 1143 enum bpf_attach_type type) 1144 { 1145 struct bpf_sysctl_kern ctx = { 1146 .head = head, 1147 .table = table, 1148 .write = write, 1149 .ppos = ppos, 1150 .cur_val = NULL, 1151 .cur_len = PAGE_SIZE, 1152 .new_val = NULL, 1153 .new_len = 0, 1154 .new_updated = 0, 1155 }; 1156 struct cgroup *cgrp; 1157 loff_t pos = 0; 1158 int ret; 1159 1160 ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL); 1161 if (!ctx.cur_val || 1162 table->proc_handler(table, 0, ctx.cur_val, &ctx.cur_len, &pos)) { 1163 /* Let BPF program decide how to proceed. */ 1164 ctx.cur_len = 0; 1165 } 1166 1167 if (write && *buf && *pcount) { 1168 /* BPF program should be able to override new value with a 1169 * buffer bigger than provided by user. 1170 */ 1171 ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL); 1172 ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount); 1173 if (ctx.new_val) { 1174 memcpy(ctx.new_val, *buf, ctx.new_len); 1175 } else { 1176 /* Let BPF program decide how to proceed. */ 1177 ctx.new_len = 0; 1178 } 1179 } 1180 1181 rcu_read_lock(); 1182 cgrp = task_dfl_cgroup(current); 1183 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN); 1184 rcu_read_unlock(); 1185 1186 kfree(ctx.cur_val); 1187 1188 if (ret == 1 && ctx.new_updated) { 1189 kfree(*buf); 1190 *buf = ctx.new_val; 1191 *pcount = ctx.new_len; 1192 } else { 1193 kfree(ctx.new_val); 1194 } 1195 1196 return ret == 1 ? 0 : -EPERM; 1197 } 1198 1199 #ifdef CONFIG_NET 1200 static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp, 1201 enum bpf_attach_type attach_type) 1202 { 1203 struct bpf_prog_array *prog_array; 1204 bool empty; 1205 1206 rcu_read_lock(); 1207 prog_array = rcu_dereference(cgrp->bpf.effective[attach_type]); 1208 empty = bpf_prog_array_is_empty(prog_array); 1209 rcu_read_unlock(); 1210 1211 return empty; 1212 } 1213 1214 static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen) 1215 { 1216 if (unlikely(max_optlen > PAGE_SIZE) || max_optlen < 0) 1217 return -EINVAL; 1218 1219 ctx->optval = kzalloc(max_optlen, GFP_USER); 1220 if (!ctx->optval) 1221 return -ENOMEM; 1222 1223 ctx->optval_end = ctx->optval + max_optlen; 1224 1225 return 0; 1226 } 1227 1228 static void sockopt_free_buf(struct bpf_sockopt_kern *ctx) 1229 { 1230 kfree(ctx->optval); 1231 } 1232 1233 int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level, 1234 int *optname, char __user *optval, 1235 int *optlen, char **kernel_optval) 1236 { 1237 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1238 struct bpf_sockopt_kern ctx = { 1239 .sk = sk, 1240 .level = *level, 1241 .optname = *optname, 1242 }; 1243 int ret, max_optlen; 1244 1245 /* Opportunistic check to see whether we have any BPF program 1246 * attached to the hook so we don't waste time allocating 1247 * memory and locking the socket. 1248 */ 1249 if (!cgroup_bpf_enabled || 1250 __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_SETSOCKOPT)) 1251 return 0; 1252 1253 /* Allocate a bit more than the initial user buffer for 1254 * BPF program. The canonical use case is overriding 1255 * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic). 1256 */ 1257 max_optlen = max_t(int, 16, *optlen); 1258 1259 ret = sockopt_alloc_buf(&ctx, max_optlen); 1260 if (ret) 1261 return ret; 1262 1263 ctx.optlen = *optlen; 1264 1265 if (copy_from_user(ctx.optval, optval, *optlen) != 0) { 1266 ret = -EFAULT; 1267 goto out; 1268 } 1269 1270 lock_sock(sk); 1271 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_SETSOCKOPT], 1272 &ctx, BPF_PROG_RUN); 1273 release_sock(sk); 1274 1275 if (!ret) { 1276 ret = -EPERM; 1277 goto out; 1278 } 1279 1280 if (ctx.optlen == -1) { 1281 /* optlen set to -1, bypass kernel */ 1282 ret = 1; 1283 } else if (ctx.optlen > max_optlen || ctx.optlen < -1) { 1284 /* optlen is out of bounds */ 1285 ret = -EFAULT; 1286 } else { 1287 /* optlen within bounds, run kernel handler */ 1288 ret = 0; 1289 1290 /* export any potential modifications */ 1291 *level = ctx.level; 1292 *optname = ctx.optname; 1293 *optlen = ctx.optlen; 1294 *kernel_optval = ctx.optval; 1295 } 1296 1297 out: 1298 if (ret) 1299 sockopt_free_buf(&ctx); 1300 return ret; 1301 } 1302 1303 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, 1304 int optname, char __user *optval, 1305 int __user *optlen, int max_optlen, 1306 int retval) 1307 { 1308 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1309 struct bpf_sockopt_kern ctx = { 1310 .sk = sk, 1311 .level = level, 1312 .optname = optname, 1313 .retval = retval, 1314 }; 1315 int ret; 1316 1317 /* Opportunistic check to see whether we have any BPF program 1318 * attached to the hook so we don't waste time allocating 1319 * memory and locking the socket. 1320 */ 1321 if (!cgroup_bpf_enabled || 1322 __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT)) 1323 return retval; 1324 1325 ret = sockopt_alloc_buf(&ctx, max_optlen); 1326 if (ret) 1327 return ret; 1328 1329 ctx.optlen = max_optlen; 1330 1331 if (!retval) { 1332 /* If kernel getsockopt finished successfully, 1333 * copy whatever was returned to the user back 1334 * into our temporary buffer. Set optlen to the 1335 * one that kernel returned as well to let 1336 * BPF programs inspect the value. 1337 */ 1338 1339 if (get_user(ctx.optlen, optlen)) { 1340 ret = -EFAULT; 1341 goto out; 1342 } 1343 1344 if (ctx.optlen > max_optlen) 1345 ctx.optlen = max_optlen; 1346 1347 if (copy_from_user(ctx.optval, optval, ctx.optlen) != 0) { 1348 ret = -EFAULT; 1349 goto out; 1350 } 1351 } 1352 1353 lock_sock(sk); 1354 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_GETSOCKOPT], 1355 &ctx, BPF_PROG_RUN); 1356 release_sock(sk); 1357 1358 if (!ret) { 1359 ret = -EPERM; 1360 goto out; 1361 } 1362 1363 if (ctx.optlen > max_optlen) { 1364 ret = -EFAULT; 1365 goto out; 1366 } 1367 1368 /* BPF programs only allowed to set retval to 0, not some 1369 * arbitrary value. 1370 */ 1371 if (ctx.retval != 0 && ctx.retval != retval) { 1372 ret = -EFAULT; 1373 goto out; 1374 } 1375 1376 if (copy_to_user(optval, ctx.optval, ctx.optlen) || 1377 put_user(ctx.optlen, optlen)) { 1378 ret = -EFAULT; 1379 goto out; 1380 } 1381 1382 ret = ctx.retval; 1383 1384 out: 1385 sockopt_free_buf(&ctx); 1386 return ret; 1387 } 1388 #endif 1389 1390 static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp, 1391 size_t *lenp) 1392 { 1393 ssize_t tmp_ret = 0, ret; 1394 1395 if (dir->header.parent) { 1396 tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp); 1397 if (tmp_ret < 0) 1398 return tmp_ret; 1399 } 1400 1401 ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp); 1402 if (ret < 0) 1403 return ret; 1404 *bufp += ret; 1405 *lenp -= ret; 1406 ret += tmp_ret; 1407 1408 /* Avoid leading slash. */ 1409 if (!ret) 1410 return ret; 1411 1412 tmp_ret = strscpy(*bufp, "/", *lenp); 1413 if (tmp_ret < 0) 1414 return tmp_ret; 1415 *bufp += tmp_ret; 1416 *lenp -= tmp_ret; 1417 1418 return ret + tmp_ret; 1419 } 1420 1421 BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf, 1422 size_t, buf_len, u64, flags) 1423 { 1424 ssize_t tmp_ret = 0, ret; 1425 1426 if (!buf) 1427 return -EINVAL; 1428 1429 if (!(flags & BPF_F_SYSCTL_BASE_NAME)) { 1430 if (!ctx->head) 1431 return -EINVAL; 1432 tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len); 1433 if (tmp_ret < 0) 1434 return tmp_ret; 1435 } 1436 1437 ret = strscpy(buf, ctx->table->procname, buf_len); 1438 1439 return ret < 0 ? ret : tmp_ret + ret; 1440 } 1441 1442 static const struct bpf_func_proto bpf_sysctl_get_name_proto = { 1443 .func = bpf_sysctl_get_name, 1444 .gpl_only = false, 1445 .ret_type = RET_INTEGER, 1446 .arg1_type = ARG_PTR_TO_CTX, 1447 .arg2_type = ARG_PTR_TO_MEM, 1448 .arg3_type = ARG_CONST_SIZE, 1449 .arg4_type = ARG_ANYTHING, 1450 }; 1451 1452 static int copy_sysctl_value(char *dst, size_t dst_len, char *src, 1453 size_t src_len) 1454 { 1455 if (!dst) 1456 return -EINVAL; 1457 1458 if (!dst_len) 1459 return -E2BIG; 1460 1461 if (!src || !src_len) { 1462 memset(dst, 0, dst_len); 1463 return -EINVAL; 1464 } 1465 1466 memcpy(dst, src, min(dst_len, src_len)); 1467 1468 if (dst_len > src_len) { 1469 memset(dst + src_len, '\0', dst_len - src_len); 1470 return src_len; 1471 } 1472 1473 dst[dst_len - 1] = '\0'; 1474 1475 return -E2BIG; 1476 } 1477 1478 BPF_CALL_3(bpf_sysctl_get_current_value, struct bpf_sysctl_kern *, ctx, 1479 char *, buf, size_t, buf_len) 1480 { 1481 return copy_sysctl_value(buf, buf_len, ctx->cur_val, ctx->cur_len); 1482 } 1483 1484 static const struct bpf_func_proto bpf_sysctl_get_current_value_proto = { 1485 .func = bpf_sysctl_get_current_value, 1486 .gpl_only = false, 1487 .ret_type = RET_INTEGER, 1488 .arg1_type = ARG_PTR_TO_CTX, 1489 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1490 .arg3_type = ARG_CONST_SIZE, 1491 }; 1492 1493 BPF_CALL_3(bpf_sysctl_get_new_value, struct bpf_sysctl_kern *, ctx, char *, buf, 1494 size_t, buf_len) 1495 { 1496 if (!ctx->write) { 1497 if (buf && buf_len) 1498 memset(buf, '\0', buf_len); 1499 return -EINVAL; 1500 } 1501 return copy_sysctl_value(buf, buf_len, ctx->new_val, ctx->new_len); 1502 } 1503 1504 static const struct bpf_func_proto bpf_sysctl_get_new_value_proto = { 1505 .func = bpf_sysctl_get_new_value, 1506 .gpl_only = false, 1507 .ret_type = RET_INTEGER, 1508 .arg1_type = ARG_PTR_TO_CTX, 1509 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1510 .arg3_type = ARG_CONST_SIZE, 1511 }; 1512 1513 BPF_CALL_3(bpf_sysctl_set_new_value, struct bpf_sysctl_kern *, ctx, 1514 const char *, buf, size_t, buf_len) 1515 { 1516 if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len) 1517 return -EINVAL; 1518 1519 if (buf_len > PAGE_SIZE - 1) 1520 return -E2BIG; 1521 1522 memcpy(ctx->new_val, buf, buf_len); 1523 ctx->new_len = buf_len; 1524 ctx->new_updated = 1; 1525 1526 return 0; 1527 } 1528 1529 static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = { 1530 .func = bpf_sysctl_set_new_value, 1531 .gpl_only = false, 1532 .ret_type = RET_INTEGER, 1533 .arg1_type = ARG_PTR_TO_CTX, 1534 .arg2_type = ARG_PTR_TO_MEM, 1535 .arg3_type = ARG_CONST_SIZE, 1536 }; 1537 1538 static const struct bpf_func_proto * 1539 sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1540 { 1541 switch (func_id) { 1542 case BPF_FUNC_strtol: 1543 return &bpf_strtol_proto; 1544 case BPF_FUNC_strtoul: 1545 return &bpf_strtoul_proto; 1546 case BPF_FUNC_sysctl_get_name: 1547 return &bpf_sysctl_get_name_proto; 1548 case BPF_FUNC_sysctl_get_current_value: 1549 return &bpf_sysctl_get_current_value_proto; 1550 case BPF_FUNC_sysctl_get_new_value: 1551 return &bpf_sysctl_get_new_value_proto; 1552 case BPF_FUNC_sysctl_set_new_value: 1553 return &bpf_sysctl_set_new_value_proto; 1554 default: 1555 return cgroup_base_func_proto(func_id, prog); 1556 } 1557 } 1558 1559 static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type, 1560 const struct bpf_prog *prog, 1561 struct bpf_insn_access_aux *info) 1562 { 1563 const int size_default = sizeof(__u32); 1564 1565 if (off < 0 || off + size > sizeof(struct bpf_sysctl) || off % size) 1566 return false; 1567 1568 switch (off) { 1569 case bpf_ctx_range(struct bpf_sysctl, write): 1570 if (type != BPF_READ) 1571 return false; 1572 bpf_ctx_record_field_size(info, size_default); 1573 return bpf_ctx_narrow_access_ok(off, size, size_default); 1574 case bpf_ctx_range(struct bpf_sysctl, file_pos): 1575 if (type == BPF_READ) { 1576 bpf_ctx_record_field_size(info, size_default); 1577 return bpf_ctx_narrow_access_ok(off, size, size_default); 1578 } else { 1579 return size == size_default; 1580 } 1581 default: 1582 return false; 1583 } 1584 } 1585 1586 static u32 sysctl_convert_ctx_access(enum bpf_access_type type, 1587 const struct bpf_insn *si, 1588 struct bpf_insn *insn_buf, 1589 struct bpf_prog *prog, u32 *target_size) 1590 { 1591 struct bpf_insn *insn = insn_buf; 1592 u32 read_size; 1593 1594 switch (si->off) { 1595 case offsetof(struct bpf_sysctl, write): 1596 *insn++ = BPF_LDX_MEM( 1597 BPF_SIZE(si->code), si->dst_reg, si->src_reg, 1598 bpf_target_off(struct bpf_sysctl_kern, write, 1599 sizeof_field(struct bpf_sysctl_kern, 1600 write), 1601 target_size)); 1602 break; 1603 case offsetof(struct bpf_sysctl, file_pos): 1604 /* ppos is a pointer so it should be accessed via indirect 1605 * loads and stores. Also for stores additional temporary 1606 * register is used since neither src_reg nor dst_reg can be 1607 * overridden. 1608 */ 1609 if (type == BPF_WRITE) { 1610 int treg = BPF_REG_9; 1611 1612 if (si->src_reg == treg || si->dst_reg == treg) 1613 --treg; 1614 if (si->src_reg == treg || si->dst_reg == treg) 1615 --treg; 1616 *insn++ = BPF_STX_MEM( 1617 BPF_DW, si->dst_reg, treg, 1618 offsetof(struct bpf_sysctl_kern, tmp_reg)); 1619 *insn++ = BPF_LDX_MEM( 1620 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos), 1621 treg, si->dst_reg, 1622 offsetof(struct bpf_sysctl_kern, ppos)); 1623 *insn++ = BPF_STX_MEM( 1624 BPF_SIZEOF(u32), treg, si->src_reg, 1625 bpf_ctx_narrow_access_offset( 1626 0, sizeof(u32), sizeof(loff_t))); 1627 *insn++ = BPF_LDX_MEM( 1628 BPF_DW, treg, si->dst_reg, 1629 offsetof(struct bpf_sysctl_kern, tmp_reg)); 1630 } else { 1631 *insn++ = BPF_LDX_MEM( 1632 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos), 1633 si->dst_reg, si->src_reg, 1634 offsetof(struct bpf_sysctl_kern, ppos)); 1635 read_size = bpf_size_to_bytes(BPF_SIZE(si->code)); 1636 *insn++ = BPF_LDX_MEM( 1637 BPF_SIZE(si->code), si->dst_reg, si->dst_reg, 1638 bpf_ctx_narrow_access_offset( 1639 0, read_size, sizeof(loff_t))); 1640 } 1641 *target_size = sizeof(u32); 1642 break; 1643 } 1644 1645 return insn - insn_buf; 1646 } 1647 1648 const struct bpf_verifier_ops cg_sysctl_verifier_ops = { 1649 .get_func_proto = sysctl_func_proto, 1650 .is_valid_access = sysctl_is_valid_access, 1651 .convert_ctx_access = sysctl_convert_ctx_access, 1652 }; 1653 1654 const struct bpf_prog_ops cg_sysctl_prog_ops = { 1655 }; 1656 1657 static const struct bpf_func_proto * 1658 cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1659 { 1660 switch (func_id) { 1661 #ifdef CONFIG_NET 1662 case BPF_FUNC_sk_storage_get: 1663 return &bpf_sk_storage_get_proto; 1664 case BPF_FUNC_sk_storage_delete: 1665 return &bpf_sk_storage_delete_proto; 1666 #endif 1667 #ifdef CONFIG_INET 1668 case BPF_FUNC_tcp_sock: 1669 return &bpf_tcp_sock_proto; 1670 #endif 1671 default: 1672 return cgroup_base_func_proto(func_id, prog); 1673 } 1674 } 1675 1676 static bool cg_sockopt_is_valid_access(int off, int size, 1677 enum bpf_access_type type, 1678 const struct bpf_prog *prog, 1679 struct bpf_insn_access_aux *info) 1680 { 1681 const int size_default = sizeof(__u32); 1682 1683 if (off < 0 || off >= sizeof(struct bpf_sockopt)) 1684 return false; 1685 1686 if (off % size != 0) 1687 return false; 1688 1689 if (type == BPF_WRITE) { 1690 switch (off) { 1691 case offsetof(struct bpf_sockopt, retval): 1692 if (size != size_default) 1693 return false; 1694 return prog->expected_attach_type == 1695 BPF_CGROUP_GETSOCKOPT; 1696 case offsetof(struct bpf_sockopt, optname): 1697 /* fallthrough */ 1698 case offsetof(struct bpf_sockopt, level): 1699 if (size != size_default) 1700 return false; 1701 return prog->expected_attach_type == 1702 BPF_CGROUP_SETSOCKOPT; 1703 case offsetof(struct bpf_sockopt, optlen): 1704 return size == size_default; 1705 default: 1706 return false; 1707 } 1708 } 1709 1710 switch (off) { 1711 case offsetof(struct bpf_sockopt, sk): 1712 if (size != sizeof(__u64)) 1713 return false; 1714 info->reg_type = PTR_TO_SOCKET; 1715 break; 1716 case offsetof(struct bpf_sockopt, optval): 1717 if (size != sizeof(__u64)) 1718 return false; 1719 info->reg_type = PTR_TO_PACKET; 1720 break; 1721 case offsetof(struct bpf_sockopt, optval_end): 1722 if (size != sizeof(__u64)) 1723 return false; 1724 info->reg_type = PTR_TO_PACKET_END; 1725 break; 1726 case offsetof(struct bpf_sockopt, retval): 1727 if (size != size_default) 1728 return false; 1729 return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT; 1730 default: 1731 if (size != size_default) 1732 return false; 1733 break; 1734 } 1735 return true; 1736 } 1737 1738 #define CG_SOCKOPT_ACCESS_FIELD(T, F) \ 1739 T(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F), \ 1740 si->dst_reg, si->src_reg, \ 1741 offsetof(struct bpf_sockopt_kern, F)) 1742 1743 static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type, 1744 const struct bpf_insn *si, 1745 struct bpf_insn *insn_buf, 1746 struct bpf_prog *prog, 1747 u32 *target_size) 1748 { 1749 struct bpf_insn *insn = insn_buf; 1750 1751 switch (si->off) { 1752 case offsetof(struct bpf_sockopt, sk): 1753 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, sk); 1754 break; 1755 case offsetof(struct bpf_sockopt, level): 1756 if (type == BPF_WRITE) 1757 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, level); 1758 else 1759 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, level); 1760 break; 1761 case offsetof(struct bpf_sockopt, optname): 1762 if (type == BPF_WRITE) 1763 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optname); 1764 else 1765 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optname); 1766 break; 1767 case offsetof(struct bpf_sockopt, optlen): 1768 if (type == BPF_WRITE) 1769 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optlen); 1770 else 1771 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optlen); 1772 break; 1773 case offsetof(struct bpf_sockopt, retval): 1774 if (type == BPF_WRITE) 1775 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, retval); 1776 else 1777 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, retval); 1778 break; 1779 case offsetof(struct bpf_sockopt, optval): 1780 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval); 1781 break; 1782 case offsetof(struct bpf_sockopt, optval_end): 1783 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval_end); 1784 break; 1785 } 1786 1787 return insn - insn_buf; 1788 } 1789 1790 static int cg_sockopt_get_prologue(struct bpf_insn *insn_buf, 1791 bool direct_write, 1792 const struct bpf_prog *prog) 1793 { 1794 /* Nothing to do for sockopt argument. The data is kzalloc'ated. 1795 */ 1796 return 0; 1797 } 1798 1799 const struct bpf_verifier_ops cg_sockopt_verifier_ops = { 1800 .get_func_proto = cg_sockopt_func_proto, 1801 .is_valid_access = cg_sockopt_is_valid_access, 1802 .convert_ctx_access = cg_sockopt_convert_ctx_access, 1803 .gen_prologue = cg_sockopt_get_prologue, 1804 }; 1805 1806 const struct bpf_prog_ops cg_sockopt_prog_ops = { 1807 }; 1808