1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Functions to manage eBPF programs attached to cgroups 4 * 5 * Copyright (c) 2016 Daniel Mack 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/atomic.h> 10 #include <linux/cgroup.h> 11 #include <linux/filter.h> 12 #include <linux/slab.h> 13 #include <linux/sysctl.h> 14 #include <linux/string.h> 15 #include <linux/bpf.h> 16 #include <linux/bpf-cgroup.h> 17 #include <net/sock.h> 18 #include <net/bpf_sk_storage.h> 19 20 #include "../cgroup/cgroup-internal.h" 21 22 DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key); 23 EXPORT_SYMBOL(cgroup_bpf_enabled_key); 24 25 void cgroup_bpf_offline(struct cgroup *cgrp) 26 { 27 cgroup_get(cgrp); 28 percpu_ref_kill(&cgrp->bpf.refcnt); 29 } 30 31 static void bpf_cgroup_storages_free(struct bpf_cgroup_storage *storages[]) 32 { 33 enum bpf_cgroup_storage_type stype; 34 35 for_each_cgroup_storage_type(stype) 36 bpf_cgroup_storage_free(storages[stype]); 37 } 38 39 static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage *storages[], 40 struct bpf_prog *prog) 41 { 42 enum bpf_cgroup_storage_type stype; 43 44 for_each_cgroup_storage_type(stype) { 45 storages[stype] = bpf_cgroup_storage_alloc(prog, stype); 46 if (IS_ERR(storages[stype])) { 47 storages[stype] = NULL; 48 bpf_cgroup_storages_free(storages); 49 return -ENOMEM; 50 } 51 } 52 53 return 0; 54 } 55 56 static void bpf_cgroup_storages_assign(struct bpf_cgroup_storage *dst[], 57 struct bpf_cgroup_storage *src[]) 58 { 59 enum bpf_cgroup_storage_type stype; 60 61 for_each_cgroup_storage_type(stype) 62 dst[stype] = src[stype]; 63 } 64 65 static void bpf_cgroup_storages_link(struct bpf_cgroup_storage *storages[], 66 struct cgroup* cgrp, 67 enum bpf_attach_type attach_type) 68 { 69 enum bpf_cgroup_storage_type stype; 70 71 for_each_cgroup_storage_type(stype) 72 bpf_cgroup_storage_link(storages[stype], cgrp, attach_type); 73 } 74 75 static void bpf_cgroup_storages_unlink(struct bpf_cgroup_storage *storages[]) 76 { 77 enum bpf_cgroup_storage_type stype; 78 79 for_each_cgroup_storage_type(stype) 80 bpf_cgroup_storage_unlink(storages[stype]); 81 } 82 83 /* Called when bpf_cgroup_link is auto-detached from dying cgroup. 84 * It drops cgroup and bpf_prog refcounts, and marks bpf_link as defunct. It 85 * doesn't free link memory, which will eventually be done by bpf_link's 86 * release() callback, when its last FD is closed. 87 */ 88 static void bpf_cgroup_link_auto_detach(struct bpf_cgroup_link *link) 89 { 90 cgroup_put(link->cgroup); 91 link->cgroup = NULL; 92 } 93 94 /** 95 * cgroup_bpf_release() - put references of all bpf programs and 96 * release all cgroup bpf data 97 * @work: work structure embedded into the cgroup to modify 98 */ 99 static void cgroup_bpf_release(struct work_struct *work) 100 { 101 struct cgroup *p, *cgrp = container_of(work, struct cgroup, 102 bpf.release_work); 103 struct bpf_prog_array *old_array; 104 unsigned int type; 105 106 mutex_lock(&cgroup_mutex); 107 108 for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) { 109 struct list_head *progs = &cgrp->bpf.progs[type]; 110 struct bpf_prog_list *pl, *tmp; 111 112 list_for_each_entry_safe(pl, tmp, progs, node) { 113 list_del(&pl->node); 114 if (pl->prog) 115 bpf_prog_put(pl->prog); 116 if (pl->link) 117 bpf_cgroup_link_auto_detach(pl->link); 118 bpf_cgroup_storages_unlink(pl->storage); 119 bpf_cgroup_storages_free(pl->storage); 120 kfree(pl); 121 static_branch_dec(&cgroup_bpf_enabled_key); 122 } 123 old_array = rcu_dereference_protected( 124 cgrp->bpf.effective[type], 125 lockdep_is_held(&cgroup_mutex)); 126 bpf_prog_array_free(old_array); 127 } 128 129 mutex_unlock(&cgroup_mutex); 130 131 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) 132 cgroup_bpf_put(p); 133 134 percpu_ref_exit(&cgrp->bpf.refcnt); 135 cgroup_put(cgrp); 136 } 137 138 /** 139 * cgroup_bpf_release_fn() - callback used to schedule releasing 140 * of bpf cgroup data 141 * @ref: percpu ref counter structure 142 */ 143 static void cgroup_bpf_release_fn(struct percpu_ref *ref) 144 { 145 struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt); 146 147 INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release); 148 queue_work(system_wq, &cgrp->bpf.release_work); 149 } 150 151 /* Get underlying bpf_prog of bpf_prog_list entry, regardless if it's through 152 * link or direct prog. 153 */ 154 static struct bpf_prog *prog_list_prog(struct bpf_prog_list *pl) 155 { 156 if (pl->prog) 157 return pl->prog; 158 if (pl->link) 159 return pl->link->link.prog; 160 return NULL; 161 } 162 163 /* count number of elements in the list. 164 * it's slow but the list cannot be long 165 */ 166 static u32 prog_list_length(struct list_head *head) 167 { 168 struct bpf_prog_list *pl; 169 u32 cnt = 0; 170 171 list_for_each_entry(pl, head, node) { 172 if (!prog_list_prog(pl)) 173 continue; 174 cnt++; 175 } 176 return cnt; 177 } 178 179 /* if parent has non-overridable prog attached, 180 * disallow attaching new programs to the descendent cgroup. 181 * if parent has overridable or multi-prog, allow attaching 182 */ 183 static bool hierarchy_allows_attach(struct cgroup *cgrp, 184 enum bpf_attach_type type) 185 { 186 struct cgroup *p; 187 188 p = cgroup_parent(cgrp); 189 if (!p) 190 return true; 191 do { 192 u32 flags = p->bpf.flags[type]; 193 u32 cnt; 194 195 if (flags & BPF_F_ALLOW_MULTI) 196 return true; 197 cnt = prog_list_length(&p->bpf.progs[type]); 198 WARN_ON_ONCE(cnt > 1); 199 if (cnt == 1) 200 return !!(flags & BPF_F_ALLOW_OVERRIDE); 201 p = cgroup_parent(p); 202 } while (p); 203 return true; 204 } 205 206 /* compute a chain of effective programs for a given cgroup: 207 * start from the list of programs in this cgroup and add 208 * all parent programs. 209 * Note that parent's F_ALLOW_OVERRIDE-type program is yielding 210 * to programs in this cgroup 211 */ 212 static int compute_effective_progs(struct cgroup *cgrp, 213 enum bpf_attach_type type, 214 struct bpf_prog_array **array) 215 { 216 struct bpf_prog_array_item *item; 217 struct bpf_prog_array *progs; 218 struct bpf_prog_list *pl; 219 struct cgroup *p = cgrp; 220 int cnt = 0; 221 222 /* count number of effective programs by walking parents */ 223 do { 224 if (cnt == 0 || (p->bpf.flags[type] & BPF_F_ALLOW_MULTI)) 225 cnt += prog_list_length(&p->bpf.progs[type]); 226 p = cgroup_parent(p); 227 } while (p); 228 229 progs = bpf_prog_array_alloc(cnt, GFP_KERNEL); 230 if (!progs) 231 return -ENOMEM; 232 233 /* populate the array with effective progs */ 234 cnt = 0; 235 p = cgrp; 236 do { 237 if (cnt > 0 && !(p->bpf.flags[type] & BPF_F_ALLOW_MULTI)) 238 continue; 239 240 list_for_each_entry(pl, &p->bpf.progs[type], node) { 241 if (!prog_list_prog(pl)) 242 continue; 243 244 item = &progs->items[cnt]; 245 item->prog = prog_list_prog(pl); 246 bpf_cgroup_storages_assign(item->cgroup_storage, 247 pl->storage); 248 cnt++; 249 } 250 } while ((p = cgroup_parent(p))); 251 252 *array = progs; 253 return 0; 254 } 255 256 static void activate_effective_progs(struct cgroup *cgrp, 257 enum bpf_attach_type type, 258 struct bpf_prog_array *old_array) 259 { 260 old_array = rcu_replace_pointer(cgrp->bpf.effective[type], old_array, 261 lockdep_is_held(&cgroup_mutex)); 262 /* free prog array after grace period, since __cgroup_bpf_run_*() 263 * might be still walking the array 264 */ 265 bpf_prog_array_free(old_array); 266 } 267 268 /** 269 * cgroup_bpf_inherit() - inherit effective programs from parent 270 * @cgrp: the cgroup to modify 271 */ 272 int cgroup_bpf_inherit(struct cgroup *cgrp) 273 { 274 /* has to use marco instead of const int, since compiler thinks 275 * that array below is variable length 276 */ 277 #define NR ARRAY_SIZE(cgrp->bpf.effective) 278 struct bpf_prog_array *arrays[NR] = {}; 279 struct cgroup *p; 280 int ret, i; 281 282 ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0, 283 GFP_KERNEL); 284 if (ret) 285 return ret; 286 287 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) 288 cgroup_bpf_get(p); 289 290 for (i = 0; i < NR; i++) 291 INIT_LIST_HEAD(&cgrp->bpf.progs[i]); 292 293 for (i = 0; i < NR; i++) 294 if (compute_effective_progs(cgrp, i, &arrays[i])) 295 goto cleanup; 296 297 for (i = 0; i < NR; i++) 298 activate_effective_progs(cgrp, i, arrays[i]); 299 300 return 0; 301 cleanup: 302 for (i = 0; i < NR; i++) 303 bpf_prog_array_free(arrays[i]); 304 305 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) 306 cgroup_bpf_put(p); 307 308 percpu_ref_exit(&cgrp->bpf.refcnt); 309 310 return -ENOMEM; 311 } 312 313 static int update_effective_progs(struct cgroup *cgrp, 314 enum bpf_attach_type type) 315 { 316 struct cgroup_subsys_state *css; 317 int err; 318 319 /* allocate and recompute effective prog arrays */ 320 css_for_each_descendant_pre(css, &cgrp->self) { 321 struct cgroup *desc = container_of(css, struct cgroup, self); 322 323 if (percpu_ref_is_zero(&desc->bpf.refcnt)) 324 continue; 325 326 err = compute_effective_progs(desc, type, &desc->bpf.inactive); 327 if (err) 328 goto cleanup; 329 } 330 331 /* all allocations were successful. Activate all prog arrays */ 332 css_for_each_descendant_pre(css, &cgrp->self) { 333 struct cgroup *desc = container_of(css, struct cgroup, self); 334 335 if (percpu_ref_is_zero(&desc->bpf.refcnt)) { 336 if (unlikely(desc->bpf.inactive)) { 337 bpf_prog_array_free(desc->bpf.inactive); 338 desc->bpf.inactive = NULL; 339 } 340 continue; 341 } 342 343 activate_effective_progs(desc, type, desc->bpf.inactive); 344 desc->bpf.inactive = NULL; 345 } 346 347 return 0; 348 349 cleanup: 350 /* oom while computing effective. Free all computed effective arrays 351 * since they were not activated 352 */ 353 css_for_each_descendant_pre(css, &cgrp->self) { 354 struct cgroup *desc = container_of(css, struct cgroup, self); 355 356 bpf_prog_array_free(desc->bpf.inactive); 357 desc->bpf.inactive = NULL; 358 } 359 360 return err; 361 } 362 363 #define BPF_CGROUP_MAX_PROGS 64 364 365 static struct bpf_prog_list *find_attach_entry(struct list_head *progs, 366 struct bpf_prog *prog, 367 struct bpf_cgroup_link *link, 368 struct bpf_prog *replace_prog, 369 bool allow_multi) 370 { 371 struct bpf_prog_list *pl; 372 373 /* single-attach case */ 374 if (!allow_multi) { 375 if (list_empty(progs)) 376 return NULL; 377 return list_first_entry(progs, typeof(*pl), node); 378 } 379 380 list_for_each_entry(pl, progs, node) { 381 if (prog && pl->prog == prog && prog != replace_prog) 382 /* disallow attaching the same prog twice */ 383 return ERR_PTR(-EINVAL); 384 if (link && pl->link == link) 385 /* disallow attaching the same link twice */ 386 return ERR_PTR(-EINVAL); 387 } 388 389 /* direct prog multi-attach w/ replacement case */ 390 if (replace_prog) { 391 list_for_each_entry(pl, progs, node) { 392 if (pl->prog == replace_prog) 393 /* a match found */ 394 return pl; 395 } 396 /* prog to replace not found for cgroup */ 397 return ERR_PTR(-ENOENT); 398 } 399 400 return NULL; 401 } 402 403 /** 404 * __cgroup_bpf_attach() - Attach the program or the link to a cgroup, and 405 * propagate the change to descendants 406 * @cgrp: The cgroup which descendants to traverse 407 * @prog: A program to attach 408 * @link: A link to attach 409 * @replace_prog: Previously attached program to replace if BPF_F_REPLACE is set 410 * @type: Type of attach operation 411 * @flags: Option flags 412 * 413 * Exactly one of @prog or @link can be non-null. 414 * Must be called with cgroup_mutex held. 415 */ 416 int __cgroup_bpf_attach(struct cgroup *cgrp, 417 struct bpf_prog *prog, struct bpf_prog *replace_prog, 418 struct bpf_cgroup_link *link, 419 enum bpf_attach_type type, u32 flags) 420 { 421 u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI)); 422 struct list_head *progs = &cgrp->bpf.progs[type]; 423 struct bpf_prog *old_prog = NULL; 424 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {}; 425 struct bpf_cgroup_storage *old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {}; 426 struct bpf_prog_list *pl; 427 int err; 428 429 if (((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) || 430 ((flags & BPF_F_REPLACE) && !(flags & BPF_F_ALLOW_MULTI))) 431 /* invalid combination */ 432 return -EINVAL; 433 if (link && (prog || replace_prog)) 434 /* only either link or prog/replace_prog can be specified */ 435 return -EINVAL; 436 if (!!replace_prog != !!(flags & BPF_F_REPLACE)) 437 /* replace_prog implies BPF_F_REPLACE, and vice versa */ 438 return -EINVAL; 439 440 if (!hierarchy_allows_attach(cgrp, type)) 441 return -EPERM; 442 443 if (!list_empty(progs) && cgrp->bpf.flags[type] != saved_flags) 444 /* Disallow attaching non-overridable on top 445 * of existing overridable in this cgroup. 446 * Disallow attaching multi-prog if overridable or none 447 */ 448 return -EPERM; 449 450 if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS) 451 return -E2BIG; 452 453 pl = find_attach_entry(progs, prog, link, replace_prog, 454 flags & BPF_F_ALLOW_MULTI); 455 if (IS_ERR(pl)) 456 return PTR_ERR(pl); 457 458 if (bpf_cgroup_storages_alloc(storage, prog ? : link->link.prog)) 459 return -ENOMEM; 460 461 if (pl) { 462 old_prog = pl->prog; 463 bpf_cgroup_storages_unlink(pl->storage); 464 bpf_cgroup_storages_assign(old_storage, pl->storage); 465 } else { 466 pl = kmalloc(sizeof(*pl), GFP_KERNEL); 467 if (!pl) { 468 bpf_cgroup_storages_free(storage); 469 return -ENOMEM; 470 } 471 list_add_tail(&pl->node, progs); 472 } 473 474 pl->prog = prog; 475 pl->link = link; 476 bpf_cgroup_storages_assign(pl->storage, storage); 477 cgrp->bpf.flags[type] = saved_flags; 478 479 err = update_effective_progs(cgrp, type); 480 if (err) 481 goto cleanup; 482 483 bpf_cgroup_storages_free(old_storage); 484 if (old_prog) 485 bpf_prog_put(old_prog); 486 else 487 static_branch_inc(&cgroup_bpf_enabled_key); 488 bpf_cgroup_storages_link(pl->storage, cgrp, type); 489 return 0; 490 491 cleanup: 492 if (old_prog) { 493 pl->prog = old_prog; 494 pl->link = NULL; 495 } 496 bpf_cgroup_storages_free(pl->storage); 497 bpf_cgroup_storages_assign(pl->storage, old_storage); 498 bpf_cgroup_storages_link(pl->storage, cgrp, type); 499 if (!old_prog) { 500 list_del(&pl->node); 501 kfree(pl); 502 } 503 return err; 504 } 505 506 /* Swap updated BPF program for given link in effective program arrays across 507 * all descendant cgroups. This function is guaranteed to succeed. 508 */ 509 static void replace_effective_prog(struct cgroup *cgrp, 510 enum bpf_attach_type type, 511 struct bpf_cgroup_link *link) 512 { 513 struct bpf_prog_array_item *item; 514 struct cgroup_subsys_state *css; 515 struct bpf_prog_array *progs; 516 struct bpf_prog_list *pl; 517 struct list_head *head; 518 struct cgroup *cg; 519 int pos; 520 521 css_for_each_descendant_pre(css, &cgrp->self) { 522 struct cgroup *desc = container_of(css, struct cgroup, self); 523 524 if (percpu_ref_is_zero(&desc->bpf.refcnt)) 525 continue; 526 527 /* find position of link in effective progs array */ 528 for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) { 529 if (pos && !(cg->bpf.flags[type] & BPF_F_ALLOW_MULTI)) 530 continue; 531 532 head = &cg->bpf.progs[type]; 533 list_for_each_entry(pl, head, node) { 534 if (!prog_list_prog(pl)) 535 continue; 536 if (pl->link == link) 537 goto found; 538 pos++; 539 } 540 } 541 found: 542 BUG_ON(!cg); 543 progs = rcu_dereference_protected( 544 desc->bpf.effective[type], 545 lockdep_is_held(&cgroup_mutex)); 546 item = &progs->items[pos]; 547 WRITE_ONCE(item->prog, link->link.prog); 548 } 549 } 550 551 /** 552 * __cgroup_bpf_replace() - Replace link's program and propagate the change 553 * to descendants 554 * @cgrp: The cgroup which descendants to traverse 555 * @link: A link for which to replace BPF program 556 * @type: Type of attach operation 557 * 558 * Must be called with cgroup_mutex held. 559 */ 560 static int __cgroup_bpf_replace(struct cgroup *cgrp, 561 struct bpf_cgroup_link *link, 562 struct bpf_prog *new_prog) 563 { 564 struct list_head *progs = &cgrp->bpf.progs[link->type]; 565 struct bpf_prog *old_prog; 566 struct bpf_prog_list *pl; 567 bool found = false; 568 569 if (link->link.prog->type != new_prog->type) 570 return -EINVAL; 571 572 list_for_each_entry(pl, progs, node) { 573 if (pl->link == link) { 574 found = true; 575 break; 576 } 577 } 578 if (!found) 579 return -ENOENT; 580 581 old_prog = xchg(&link->link.prog, new_prog); 582 replace_effective_prog(cgrp, link->type, link); 583 bpf_prog_put(old_prog); 584 return 0; 585 } 586 587 static int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *new_prog, 588 struct bpf_prog *old_prog) 589 { 590 struct bpf_cgroup_link *cg_link; 591 int ret; 592 593 cg_link = container_of(link, struct bpf_cgroup_link, link); 594 595 mutex_lock(&cgroup_mutex); 596 /* link might have been auto-released by dying cgroup, so fail */ 597 if (!cg_link->cgroup) { 598 ret = -ENOLINK; 599 goto out_unlock; 600 } 601 if (old_prog && link->prog != old_prog) { 602 ret = -EPERM; 603 goto out_unlock; 604 } 605 ret = __cgroup_bpf_replace(cg_link->cgroup, cg_link, new_prog); 606 out_unlock: 607 mutex_unlock(&cgroup_mutex); 608 return ret; 609 } 610 611 static struct bpf_prog_list *find_detach_entry(struct list_head *progs, 612 struct bpf_prog *prog, 613 struct bpf_cgroup_link *link, 614 bool allow_multi) 615 { 616 struct bpf_prog_list *pl; 617 618 if (!allow_multi) { 619 if (list_empty(progs)) 620 /* report error when trying to detach and nothing is attached */ 621 return ERR_PTR(-ENOENT); 622 623 /* to maintain backward compatibility NONE and OVERRIDE cgroups 624 * allow detaching with invalid FD (prog==NULL) in legacy mode 625 */ 626 return list_first_entry(progs, typeof(*pl), node); 627 } 628 629 if (!prog && !link) 630 /* to detach MULTI prog the user has to specify valid FD 631 * of the program or link to be detached 632 */ 633 return ERR_PTR(-EINVAL); 634 635 /* find the prog or link and detach it */ 636 list_for_each_entry(pl, progs, node) { 637 if (pl->prog == prog && pl->link == link) 638 return pl; 639 } 640 return ERR_PTR(-ENOENT); 641 } 642 643 /** 644 * __cgroup_bpf_detach() - Detach the program or link from a cgroup, and 645 * propagate the change to descendants 646 * @cgrp: The cgroup which descendants to traverse 647 * @prog: A program to detach or NULL 648 * @prog: A link to detach or NULL 649 * @type: Type of detach operation 650 * 651 * At most one of @prog or @link can be non-NULL. 652 * Must be called with cgroup_mutex held. 653 */ 654 int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, 655 struct bpf_cgroup_link *link, enum bpf_attach_type type) 656 { 657 struct list_head *progs = &cgrp->bpf.progs[type]; 658 u32 flags = cgrp->bpf.flags[type]; 659 struct bpf_prog_list *pl; 660 struct bpf_prog *old_prog; 661 int err; 662 663 if (prog && link) 664 /* only one of prog or link can be specified */ 665 return -EINVAL; 666 667 pl = find_detach_entry(progs, prog, link, flags & BPF_F_ALLOW_MULTI); 668 if (IS_ERR(pl)) 669 return PTR_ERR(pl); 670 671 /* mark it deleted, so it's ignored while recomputing effective */ 672 old_prog = pl->prog; 673 pl->prog = NULL; 674 pl->link = NULL; 675 676 err = update_effective_progs(cgrp, type); 677 if (err) 678 goto cleanup; 679 680 /* now can actually delete it from this cgroup list */ 681 list_del(&pl->node); 682 bpf_cgroup_storages_unlink(pl->storage); 683 bpf_cgroup_storages_free(pl->storage); 684 kfree(pl); 685 if (list_empty(progs)) 686 /* last program was detached, reset flags to zero */ 687 cgrp->bpf.flags[type] = 0; 688 if (old_prog) 689 bpf_prog_put(old_prog); 690 static_branch_dec(&cgroup_bpf_enabled_key); 691 return 0; 692 693 cleanup: 694 /* restore back prog or link */ 695 pl->prog = old_prog; 696 pl->link = link; 697 return err; 698 } 699 700 /* Must be called with cgroup_mutex held to avoid races. */ 701 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, 702 union bpf_attr __user *uattr) 703 { 704 __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); 705 enum bpf_attach_type type = attr->query.attach_type; 706 struct list_head *progs = &cgrp->bpf.progs[type]; 707 u32 flags = cgrp->bpf.flags[type]; 708 struct bpf_prog_array *effective; 709 struct bpf_prog *prog; 710 int cnt, ret = 0, i; 711 712 effective = rcu_dereference_protected(cgrp->bpf.effective[type], 713 lockdep_is_held(&cgroup_mutex)); 714 715 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) 716 cnt = bpf_prog_array_length(effective); 717 else 718 cnt = prog_list_length(progs); 719 720 if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags))) 721 return -EFAULT; 722 if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt))) 723 return -EFAULT; 724 if (attr->query.prog_cnt == 0 || !prog_ids || !cnt) 725 /* return early if user requested only program count + flags */ 726 return 0; 727 if (attr->query.prog_cnt < cnt) { 728 cnt = attr->query.prog_cnt; 729 ret = -ENOSPC; 730 } 731 732 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) { 733 return bpf_prog_array_copy_to_user(effective, prog_ids, cnt); 734 } else { 735 struct bpf_prog_list *pl; 736 u32 id; 737 738 i = 0; 739 list_for_each_entry(pl, progs, node) { 740 prog = prog_list_prog(pl); 741 id = prog->aux->id; 742 if (copy_to_user(prog_ids + i, &id, sizeof(id))) 743 return -EFAULT; 744 if (++i == cnt) 745 break; 746 } 747 } 748 return ret; 749 } 750 751 int cgroup_bpf_prog_attach(const union bpf_attr *attr, 752 enum bpf_prog_type ptype, struct bpf_prog *prog) 753 { 754 struct bpf_prog *replace_prog = NULL; 755 struct cgroup *cgrp; 756 int ret; 757 758 cgrp = cgroup_get_from_fd(attr->target_fd); 759 if (IS_ERR(cgrp)) 760 return PTR_ERR(cgrp); 761 762 if ((attr->attach_flags & BPF_F_ALLOW_MULTI) && 763 (attr->attach_flags & BPF_F_REPLACE)) { 764 replace_prog = bpf_prog_get_type(attr->replace_bpf_fd, ptype); 765 if (IS_ERR(replace_prog)) { 766 cgroup_put(cgrp); 767 return PTR_ERR(replace_prog); 768 } 769 } 770 771 ret = cgroup_bpf_attach(cgrp, prog, replace_prog, NULL, 772 attr->attach_type, attr->attach_flags); 773 774 if (replace_prog) 775 bpf_prog_put(replace_prog); 776 cgroup_put(cgrp); 777 return ret; 778 } 779 780 int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) 781 { 782 struct bpf_prog *prog; 783 struct cgroup *cgrp; 784 int ret; 785 786 cgrp = cgroup_get_from_fd(attr->target_fd); 787 if (IS_ERR(cgrp)) 788 return PTR_ERR(cgrp); 789 790 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 791 if (IS_ERR(prog)) 792 prog = NULL; 793 794 ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type); 795 if (prog) 796 bpf_prog_put(prog); 797 798 cgroup_put(cgrp); 799 return ret; 800 } 801 802 static void bpf_cgroup_link_release(struct bpf_link *link) 803 { 804 struct bpf_cgroup_link *cg_link = 805 container_of(link, struct bpf_cgroup_link, link); 806 807 /* link might have been auto-detached by dying cgroup already, 808 * in that case our work is done here 809 */ 810 if (!cg_link->cgroup) 811 return; 812 813 mutex_lock(&cgroup_mutex); 814 815 /* re-check cgroup under lock again */ 816 if (!cg_link->cgroup) { 817 mutex_unlock(&cgroup_mutex); 818 return; 819 } 820 821 WARN_ON(__cgroup_bpf_detach(cg_link->cgroup, NULL, cg_link, 822 cg_link->type)); 823 824 mutex_unlock(&cgroup_mutex); 825 cgroup_put(cg_link->cgroup); 826 } 827 828 static void bpf_cgroup_link_dealloc(struct bpf_link *link) 829 { 830 struct bpf_cgroup_link *cg_link = 831 container_of(link, struct bpf_cgroup_link, link); 832 833 kfree(cg_link); 834 } 835 836 static void bpf_cgroup_link_show_fdinfo(const struct bpf_link *link, 837 struct seq_file *seq) 838 { 839 struct bpf_cgroup_link *cg_link = 840 container_of(link, struct bpf_cgroup_link, link); 841 u64 cg_id = 0; 842 843 mutex_lock(&cgroup_mutex); 844 if (cg_link->cgroup) 845 cg_id = cgroup_id(cg_link->cgroup); 846 mutex_unlock(&cgroup_mutex); 847 848 seq_printf(seq, 849 "cgroup_id:\t%llu\n" 850 "attach_type:\t%d\n", 851 cg_id, 852 cg_link->type); 853 } 854 855 static int bpf_cgroup_link_fill_link_info(const struct bpf_link *link, 856 struct bpf_link_info *info) 857 { 858 struct bpf_cgroup_link *cg_link = 859 container_of(link, struct bpf_cgroup_link, link); 860 u64 cg_id = 0; 861 862 mutex_lock(&cgroup_mutex); 863 if (cg_link->cgroup) 864 cg_id = cgroup_id(cg_link->cgroup); 865 mutex_unlock(&cgroup_mutex); 866 867 info->cgroup.cgroup_id = cg_id; 868 info->cgroup.attach_type = cg_link->type; 869 return 0; 870 } 871 872 static const struct bpf_link_ops bpf_cgroup_link_lops = { 873 .release = bpf_cgroup_link_release, 874 .dealloc = bpf_cgroup_link_dealloc, 875 .update_prog = cgroup_bpf_replace, 876 .show_fdinfo = bpf_cgroup_link_show_fdinfo, 877 .fill_link_info = bpf_cgroup_link_fill_link_info, 878 }; 879 880 int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 881 { 882 struct bpf_link_primer link_primer; 883 struct bpf_cgroup_link *link; 884 struct cgroup *cgrp; 885 int err; 886 887 if (attr->link_create.flags) 888 return -EINVAL; 889 890 cgrp = cgroup_get_from_fd(attr->link_create.target_fd); 891 if (IS_ERR(cgrp)) 892 return PTR_ERR(cgrp); 893 894 link = kzalloc(sizeof(*link), GFP_USER); 895 if (!link) { 896 err = -ENOMEM; 897 goto out_put_cgroup; 898 } 899 bpf_link_init(&link->link, BPF_LINK_TYPE_CGROUP, &bpf_cgroup_link_lops, 900 prog); 901 link->cgroup = cgrp; 902 link->type = attr->link_create.attach_type; 903 904 err = bpf_link_prime(&link->link, &link_primer); 905 if (err) { 906 kfree(link); 907 goto out_put_cgroup; 908 } 909 910 err = cgroup_bpf_attach(cgrp, NULL, NULL, link, link->type, 911 BPF_F_ALLOW_MULTI); 912 if (err) { 913 bpf_link_cleanup(&link_primer); 914 goto out_put_cgroup; 915 } 916 917 return bpf_link_settle(&link_primer); 918 919 out_put_cgroup: 920 cgroup_put(cgrp); 921 return err; 922 } 923 924 int cgroup_bpf_prog_query(const union bpf_attr *attr, 925 union bpf_attr __user *uattr) 926 { 927 struct cgroup *cgrp; 928 int ret; 929 930 cgrp = cgroup_get_from_fd(attr->query.target_fd); 931 if (IS_ERR(cgrp)) 932 return PTR_ERR(cgrp); 933 934 ret = cgroup_bpf_query(cgrp, attr, uattr); 935 936 cgroup_put(cgrp); 937 return ret; 938 } 939 940 /** 941 * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering 942 * @sk: The socket sending or receiving traffic 943 * @skb: The skb that is being sent or received 944 * @type: The type of program to be exectuted 945 * 946 * If no socket is passed, or the socket is not of type INET or INET6, 947 * this function does nothing and returns 0. 948 * 949 * The program type passed in via @type must be suitable for network 950 * filtering. No further check is performed to assert that. 951 * 952 * For egress packets, this function can return: 953 * NET_XMIT_SUCCESS (0) - continue with packet output 954 * NET_XMIT_DROP (1) - drop packet and notify TCP to call cwr 955 * NET_XMIT_CN (2) - continue with packet output and notify TCP 956 * to call cwr 957 * -EPERM - drop packet 958 * 959 * For ingress packets, this function will return -EPERM if any 960 * attached program was found and if it returned != 1 during execution. 961 * Otherwise 0 is returned. 962 */ 963 int __cgroup_bpf_run_filter_skb(struct sock *sk, 964 struct sk_buff *skb, 965 enum bpf_attach_type type) 966 { 967 unsigned int offset = skb->data - skb_network_header(skb); 968 struct sock *save_sk; 969 void *saved_data_end; 970 struct cgroup *cgrp; 971 int ret; 972 973 if (!sk || !sk_fullsock(sk)) 974 return 0; 975 976 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6) 977 return 0; 978 979 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 980 save_sk = skb->sk; 981 skb->sk = sk; 982 __skb_push(skb, offset); 983 984 /* compute pointers for the bpf prog */ 985 bpf_compute_and_save_data_end(skb, &saved_data_end); 986 987 if (type == BPF_CGROUP_INET_EGRESS) { 988 ret = BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY( 989 cgrp->bpf.effective[type], skb, __bpf_prog_run_save_cb); 990 } else { 991 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb, 992 __bpf_prog_run_save_cb); 993 ret = (ret == 1 ? 0 : -EPERM); 994 } 995 bpf_restore_data_end(skb, saved_data_end); 996 __skb_pull(skb, offset); 997 skb->sk = save_sk; 998 999 return ret; 1000 } 1001 EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb); 1002 1003 /** 1004 * __cgroup_bpf_run_filter_sk() - Run a program on a sock 1005 * @sk: sock structure to manipulate 1006 * @type: The type of program to be exectuted 1007 * 1008 * socket is passed is expected to be of type INET or INET6. 1009 * 1010 * The program type passed in via @type must be suitable for sock 1011 * filtering. No further check is performed to assert that. 1012 * 1013 * This function will return %-EPERM if any if an attached program was found 1014 * and if it returned != 1 during execution. In all other cases, 0 is returned. 1015 */ 1016 int __cgroup_bpf_run_filter_sk(struct sock *sk, 1017 enum bpf_attach_type type) 1018 { 1019 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1020 int ret; 1021 1022 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sk, BPF_PROG_RUN); 1023 return ret == 1 ? 0 : -EPERM; 1024 } 1025 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk); 1026 1027 /** 1028 * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and 1029 * provided by user sockaddr 1030 * @sk: sock struct that will use sockaddr 1031 * @uaddr: sockaddr struct provided by user 1032 * @type: The type of program to be exectuted 1033 * @t_ctx: Pointer to attach type specific context 1034 * 1035 * socket is expected to be of type INET or INET6. 1036 * 1037 * This function will return %-EPERM if an attached program is found and 1038 * returned value != 1 during execution. In all other cases, 0 is returned. 1039 */ 1040 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, 1041 struct sockaddr *uaddr, 1042 enum bpf_attach_type type, 1043 void *t_ctx) 1044 { 1045 struct bpf_sock_addr_kern ctx = { 1046 .sk = sk, 1047 .uaddr = uaddr, 1048 .t_ctx = t_ctx, 1049 }; 1050 struct sockaddr_storage unspec; 1051 struct cgroup *cgrp; 1052 int ret; 1053 1054 /* Check socket family since not all sockets represent network 1055 * endpoint (e.g. AF_UNIX). 1056 */ 1057 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6) 1058 return 0; 1059 1060 if (!ctx.uaddr) { 1061 memset(&unspec, 0, sizeof(unspec)); 1062 ctx.uaddr = (struct sockaddr *)&unspec; 1063 } 1064 1065 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1066 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN); 1067 1068 return ret == 1 ? 0 : -EPERM; 1069 } 1070 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr); 1071 1072 /** 1073 * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock 1074 * @sk: socket to get cgroup from 1075 * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains 1076 * sk with connection information (IP addresses, etc.) May not contain 1077 * cgroup info if it is a req sock. 1078 * @type: The type of program to be exectuted 1079 * 1080 * socket passed is expected to be of type INET or INET6. 1081 * 1082 * The program type passed in via @type must be suitable for sock_ops 1083 * filtering. No further check is performed to assert that. 1084 * 1085 * This function will return %-EPERM if any if an attached program was found 1086 * and if it returned != 1 during execution. In all other cases, 0 is returned. 1087 */ 1088 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, 1089 struct bpf_sock_ops_kern *sock_ops, 1090 enum bpf_attach_type type) 1091 { 1092 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1093 int ret; 1094 1095 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sock_ops, 1096 BPF_PROG_RUN); 1097 return ret == 1 ? 0 : -EPERM; 1098 } 1099 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops); 1100 1101 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, 1102 short access, enum bpf_attach_type type) 1103 { 1104 struct cgroup *cgrp; 1105 struct bpf_cgroup_dev_ctx ctx = { 1106 .access_type = (access << 16) | dev_type, 1107 .major = major, 1108 .minor = minor, 1109 }; 1110 int allow = 1; 1111 1112 rcu_read_lock(); 1113 cgrp = task_dfl_cgroup(current); 1114 allow = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, 1115 BPF_PROG_RUN); 1116 rcu_read_unlock(); 1117 1118 return !allow; 1119 } 1120 1121 static const struct bpf_func_proto * 1122 cgroup_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1123 { 1124 switch (func_id) { 1125 case BPF_FUNC_get_current_uid_gid: 1126 return &bpf_get_current_uid_gid_proto; 1127 case BPF_FUNC_get_local_storage: 1128 return &bpf_get_local_storage_proto; 1129 case BPF_FUNC_get_current_cgroup_id: 1130 return &bpf_get_current_cgroup_id_proto; 1131 case BPF_FUNC_perf_event_output: 1132 return &bpf_event_output_data_proto; 1133 default: 1134 return bpf_base_func_proto(func_id); 1135 } 1136 } 1137 1138 static const struct bpf_func_proto * 1139 cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1140 { 1141 return cgroup_base_func_proto(func_id, prog); 1142 } 1143 1144 static bool cgroup_dev_is_valid_access(int off, int size, 1145 enum bpf_access_type type, 1146 const struct bpf_prog *prog, 1147 struct bpf_insn_access_aux *info) 1148 { 1149 const int size_default = sizeof(__u32); 1150 1151 if (type == BPF_WRITE) 1152 return false; 1153 1154 if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx)) 1155 return false; 1156 /* The verifier guarantees that size > 0. */ 1157 if (off % size != 0) 1158 return false; 1159 1160 switch (off) { 1161 case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type): 1162 bpf_ctx_record_field_size(info, size_default); 1163 if (!bpf_ctx_narrow_access_ok(off, size, size_default)) 1164 return false; 1165 break; 1166 default: 1167 if (size != size_default) 1168 return false; 1169 } 1170 1171 return true; 1172 } 1173 1174 const struct bpf_prog_ops cg_dev_prog_ops = { 1175 }; 1176 1177 const struct bpf_verifier_ops cg_dev_verifier_ops = { 1178 .get_func_proto = cgroup_dev_func_proto, 1179 .is_valid_access = cgroup_dev_is_valid_access, 1180 }; 1181 1182 /** 1183 * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl 1184 * 1185 * @head: sysctl table header 1186 * @table: sysctl table 1187 * @write: sysctl is being read (= 0) or written (= 1) 1188 * @buf: pointer to buffer (in and out) 1189 * @pcount: value-result argument: value is size of buffer pointed to by @buf, 1190 * result is size of @new_buf if program set new value, initial value 1191 * otherwise 1192 * @ppos: value-result argument: value is position at which read from or write 1193 * to sysctl is happening, result is new position if program overrode it, 1194 * initial value otherwise 1195 * @type: type of program to be executed 1196 * 1197 * Program is run when sysctl is being accessed, either read or written, and 1198 * can allow or deny such access. 1199 * 1200 * This function will return %-EPERM if an attached program is found and 1201 * returned value != 1 during execution. In all other cases 0 is returned. 1202 */ 1203 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, 1204 struct ctl_table *table, int write, 1205 void **buf, size_t *pcount, loff_t *ppos, 1206 enum bpf_attach_type type) 1207 { 1208 struct bpf_sysctl_kern ctx = { 1209 .head = head, 1210 .table = table, 1211 .write = write, 1212 .ppos = ppos, 1213 .cur_val = NULL, 1214 .cur_len = PAGE_SIZE, 1215 .new_val = NULL, 1216 .new_len = 0, 1217 .new_updated = 0, 1218 }; 1219 struct cgroup *cgrp; 1220 loff_t pos = 0; 1221 int ret; 1222 1223 ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL); 1224 if (!ctx.cur_val || 1225 table->proc_handler(table, 0, ctx.cur_val, &ctx.cur_len, &pos)) { 1226 /* Let BPF program decide how to proceed. */ 1227 ctx.cur_len = 0; 1228 } 1229 1230 if (write && *buf && *pcount) { 1231 /* BPF program should be able to override new value with a 1232 * buffer bigger than provided by user. 1233 */ 1234 ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL); 1235 ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount); 1236 if (ctx.new_val) { 1237 memcpy(ctx.new_val, *buf, ctx.new_len); 1238 } else { 1239 /* Let BPF program decide how to proceed. */ 1240 ctx.new_len = 0; 1241 } 1242 } 1243 1244 rcu_read_lock(); 1245 cgrp = task_dfl_cgroup(current); 1246 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN); 1247 rcu_read_unlock(); 1248 1249 kfree(ctx.cur_val); 1250 1251 if (ret == 1 && ctx.new_updated) { 1252 kfree(*buf); 1253 *buf = ctx.new_val; 1254 *pcount = ctx.new_len; 1255 } else { 1256 kfree(ctx.new_val); 1257 } 1258 1259 return ret == 1 ? 0 : -EPERM; 1260 } 1261 1262 #ifdef CONFIG_NET 1263 static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp, 1264 enum bpf_attach_type attach_type) 1265 { 1266 struct bpf_prog_array *prog_array; 1267 bool empty; 1268 1269 rcu_read_lock(); 1270 prog_array = rcu_dereference(cgrp->bpf.effective[attach_type]); 1271 empty = bpf_prog_array_is_empty(prog_array); 1272 rcu_read_unlock(); 1273 1274 return empty; 1275 } 1276 1277 static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen) 1278 { 1279 if (unlikely(max_optlen > PAGE_SIZE) || max_optlen < 0) 1280 return -EINVAL; 1281 1282 ctx->optval = kzalloc(max_optlen, GFP_USER); 1283 if (!ctx->optval) 1284 return -ENOMEM; 1285 1286 ctx->optval_end = ctx->optval + max_optlen; 1287 1288 return 0; 1289 } 1290 1291 static void sockopt_free_buf(struct bpf_sockopt_kern *ctx) 1292 { 1293 kfree(ctx->optval); 1294 } 1295 1296 int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level, 1297 int *optname, char __user *optval, 1298 int *optlen, char **kernel_optval) 1299 { 1300 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1301 struct bpf_sockopt_kern ctx = { 1302 .sk = sk, 1303 .level = *level, 1304 .optname = *optname, 1305 }; 1306 int ret, max_optlen; 1307 1308 /* Opportunistic check to see whether we have any BPF program 1309 * attached to the hook so we don't waste time allocating 1310 * memory and locking the socket. 1311 */ 1312 if (!cgroup_bpf_enabled || 1313 __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_SETSOCKOPT)) 1314 return 0; 1315 1316 /* Allocate a bit more than the initial user buffer for 1317 * BPF program. The canonical use case is overriding 1318 * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic). 1319 */ 1320 max_optlen = max_t(int, 16, *optlen); 1321 1322 ret = sockopt_alloc_buf(&ctx, max_optlen); 1323 if (ret) 1324 return ret; 1325 1326 ctx.optlen = *optlen; 1327 1328 if (copy_from_user(ctx.optval, optval, *optlen) != 0) { 1329 ret = -EFAULT; 1330 goto out; 1331 } 1332 1333 lock_sock(sk); 1334 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_SETSOCKOPT], 1335 &ctx, BPF_PROG_RUN); 1336 release_sock(sk); 1337 1338 if (!ret) { 1339 ret = -EPERM; 1340 goto out; 1341 } 1342 1343 if (ctx.optlen == -1) { 1344 /* optlen set to -1, bypass kernel */ 1345 ret = 1; 1346 } else if (ctx.optlen > max_optlen || ctx.optlen < -1) { 1347 /* optlen is out of bounds */ 1348 ret = -EFAULT; 1349 } else { 1350 /* optlen within bounds, run kernel handler */ 1351 ret = 0; 1352 1353 /* export any potential modifications */ 1354 *level = ctx.level; 1355 *optname = ctx.optname; 1356 *optlen = ctx.optlen; 1357 *kernel_optval = ctx.optval; 1358 } 1359 1360 out: 1361 if (ret) 1362 sockopt_free_buf(&ctx); 1363 return ret; 1364 } 1365 1366 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, 1367 int optname, char __user *optval, 1368 int __user *optlen, int max_optlen, 1369 int retval) 1370 { 1371 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1372 struct bpf_sockopt_kern ctx = { 1373 .sk = sk, 1374 .level = level, 1375 .optname = optname, 1376 .retval = retval, 1377 }; 1378 int ret; 1379 1380 /* Opportunistic check to see whether we have any BPF program 1381 * attached to the hook so we don't waste time allocating 1382 * memory and locking the socket. 1383 */ 1384 if (!cgroup_bpf_enabled || 1385 __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT)) 1386 return retval; 1387 1388 ret = sockopt_alloc_buf(&ctx, max_optlen); 1389 if (ret) 1390 return ret; 1391 1392 ctx.optlen = max_optlen; 1393 1394 if (!retval) { 1395 /* If kernel getsockopt finished successfully, 1396 * copy whatever was returned to the user back 1397 * into our temporary buffer. Set optlen to the 1398 * one that kernel returned as well to let 1399 * BPF programs inspect the value. 1400 */ 1401 1402 if (get_user(ctx.optlen, optlen)) { 1403 ret = -EFAULT; 1404 goto out; 1405 } 1406 1407 if (ctx.optlen > max_optlen) 1408 ctx.optlen = max_optlen; 1409 1410 if (copy_from_user(ctx.optval, optval, ctx.optlen) != 0) { 1411 ret = -EFAULT; 1412 goto out; 1413 } 1414 } 1415 1416 lock_sock(sk); 1417 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_GETSOCKOPT], 1418 &ctx, BPF_PROG_RUN); 1419 release_sock(sk); 1420 1421 if (!ret) { 1422 ret = -EPERM; 1423 goto out; 1424 } 1425 1426 if (ctx.optlen > max_optlen) { 1427 ret = -EFAULT; 1428 goto out; 1429 } 1430 1431 /* BPF programs only allowed to set retval to 0, not some 1432 * arbitrary value. 1433 */ 1434 if (ctx.retval != 0 && ctx.retval != retval) { 1435 ret = -EFAULT; 1436 goto out; 1437 } 1438 1439 if (copy_to_user(optval, ctx.optval, ctx.optlen) || 1440 put_user(ctx.optlen, optlen)) { 1441 ret = -EFAULT; 1442 goto out; 1443 } 1444 1445 ret = ctx.retval; 1446 1447 out: 1448 sockopt_free_buf(&ctx); 1449 return ret; 1450 } 1451 #endif 1452 1453 static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp, 1454 size_t *lenp) 1455 { 1456 ssize_t tmp_ret = 0, ret; 1457 1458 if (dir->header.parent) { 1459 tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp); 1460 if (tmp_ret < 0) 1461 return tmp_ret; 1462 } 1463 1464 ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp); 1465 if (ret < 0) 1466 return ret; 1467 *bufp += ret; 1468 *lenp -= ret; 1469 ret += tmp_ret; 1470 1471 /* Avoid leading slash. */ 1472 if (!ret) 1473 return ret; 1474 1475 tmp_ret = strscpy(*bufp, "/", *lenp); 1476 if (tmp_ret < 0) 1477 return tmp_ret; 1478 *bufp += tmp_ret; 1479 *lenp -= tmp_ret; 1480 1481 return ret + tmp_ret; 1482 } 1483 1484 BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf, 1485 size_t, buf_len, u64, flags) 1486 { 1487 ssize_t tmp_ret = 0, ret; 1488 1489 if (!buf) 1490 return -EINVAL; 1491 1492 if (!(flags & BPF_F_SYSCTL_BASE_NAME)) { 1493 if (!ctx->head) 1494 return -EINVAL; 1495 tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len); 1496 if (tmp_ret < 0) 1497 return tmp_ret; 1498 } 1499 1500 ret = strscpy(buf, ctx->table->procname, buf_len); 1501 1502 return ret < 0 ? ret : tmp_ret + ret; 1503 } 1504 1505 static const struct bpf_func_proto bpf_sysctl_get_name_proto = { 1506 .func = bpf_sysctl_get_name, 1507 .gpl_only = false, 1508 .ret_type = RET_INTEGER, 1509 .arg1_type = ARG_PTR_TO_CTX, 1510 .arg2_type = ARG_PTR_TO_MEM, 1511 .arg3_type = ARG_CONST_SIZE, 1512 .arg4_type = ARG_ANYTHING, 1513 }; 1514 1515 static int copy_sysctl_value(char *dst, size_t dst_len, char *src, 1516 size_t src_len) 1517 { 1518 if (!dst) 1519 return -EINVAL; 1520 1521 if (!dst_len) 1522 return -E2BIG; 1523 1524 if (!src || !src_len) { 1525 memset(dst, 0, dst_len); 1526 return -EINVAL; 1527 } 1528 1529 memcpy(dst, src, min(dst_len, src_len)); 1530 1531 if (dst_len > src_len) { 1532 memset(dst + src_len, '\0', dst_len - src_len); 1533 return src_len; 1534 } 1535 1536 dst[dst_len - 1] = '\0'; 1537 1538 return -E2BIG; 1539 } 1540 1541 BPF_CALL_3(bpf_sysctl_get_current_value, struct bpf_sysctl_kern *, ctx, 1542 char *, buf, size_t, buf_len) 1543 { 1544 return copy_sysctl_value(buf, buf_len, ctx->cur_val, ctx->cur_len); 1545 } 1546 1547 static const struct bpf_func_proto bpf_sysctl_get_current_value_proto = { 1548 .func = bpf_sysctl_get_current_value, 1549 .gpl_only = false, 1550 .ret_type = RET_INTEGER, 1551 .arg1_type = ARG_PTR_TO_CTX, 1552 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1553 .arg3_type = ARG_CONST_SIZE, 1554 }; 1555 1556 BPF_CALL_3(bpf_sysctl_get_new_value, struct bpf_sysctl_kern *, ctx, char *, buf, 1557 size_t, buf_len) 1558 { 1559 if (!ctx->write) { 1560 if (buf && buf_len) 1561 memset(buf, '\0', buf_len); 1562 return -EINVAL; 1563 } 1564 return copy_sysctl_value(buf, buf_len, ctx->new_val, ctx->new_len); 1565 } 1566 1567 static const struct bpf_func_proto bpf_sysctl_get_new_value_proto = { 1568 .func = bpf_sysctl_get_new_value, 1569 .gpl_only = false, 1570 .ret_type = RET_INTEGER, 1571 .arg1_type = ARG_PTR_TO_CTX, 1572 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1573 .arg3_type = ARG_CONST_SIZE, 1574 }; 1575 1576 BPF_CALL_3(bpf_sysctl_set_new_value, struct bpf_sysctl_kern *, ctx, 1577 const char *, buf, size_t, buf_len) 1578 { 1579 if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len) 1580 return -EINVAL; 1581 1582 if (buf_len > PAGE_SIZE - 1) 1583 return -E2BIG; 1584 1585 memcpy(ctx->new_val, buf, buf_len); 1586 ctx->new_len = buf_len; 1587 ctx->new_updated = 1; 1588 1589 return 0; 1590 } 1591 1592 static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = { 1593 .func = bpf_sysctl_set_new_value, 1594 .gpl_only = false, 1595 .ret_type = RET_INTEGER, 1596 .arg1_type = ARG_PTR_TO_CTX, 1597 .arg2_type = ARG_PTR_TO_MEM, 1598 .arg3_type = ARG_CONST_SIZE, 1599 }; 1600 1601 static const struct bpf_func_proto * 1602 sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1603 { 1604 switch (func_id) { 1605 case BPF_FUNC_strtol: 1606 return &bpf_strtol_proto; 1607 case BPF_FUNC_strtoul: 1608 return &bpf_strtoul_proto; 1609 case BPF_FUNC_sysctl_get_name: 1610 return &bpf_sysctl_get_name_proto; 1611 case BPF_FUNC_sysctl_get_current_value: 1612 return &bpf_sysctl_get_current_value_proto; 1613 case BPF_FUNC_sysctl_get_new_value: 1614 return &bpf_sysctl_get_new_value_proto; 1615 case BPF_FUNC_sysctl_set_new_value: 1616 return &bpf_sysctl_set_new_value_proto; 1617 default: 1618 return cgroup_base_func_proto(func_id, prog); 1619 } 1620 } 1621 1622 static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type, 1623 const struct bpf_prog *prog, 1624 struct bpf_insn_access_aux *info) 1625 { 1626 const int size_default = sizeof(__u32); 1627 1628 if (off < 0 || off + size > sizeof(struct bpf_sysctl) || off % size) 1629 return false; 1630 1631 switch (off) { 1632 case bpf_ctx_range(struct bpf_sysctl, write): 1633 if (type != BPF_READ) 1634 return false; 1635 bpf_ctx_record_field_size(info, size_default); 1636 return bpf_ctx_narrow_access_ok(off, size, size_default); 1637 case bpf_ctx_range(struct bpf_sysctl, file_pos): 1638 if (type == BPF_READ) { 1639 bpf_ctx_record_field_size(info, size_default); 1640 return bpf_ctx_narrow_access_ok(off, size, size_default); 1641 } else { 1642 return size == size_default; 1643 } 1644 default: 1645 return false; 1646 } 1647 } 1648 1649 static u32 sysctl_convert_ctx_access(enum bpf_access_type type, 1650 const struct bpf_insn *si, 1651 struct bpf_insn *insn_buf, 1652 struct bpf_prog *prog, u32 *target_size) 1653 { 1654 struct bpf_insn *insn = insn_buf; 1655 u32 read_size; 1656 1657 switch (si->off) { 1658 case offsetof(struct bpf_sysctl, write): 1659 *insn++ = BPF_LDX_MEM( 1660 BPF_SIZE(si->code), si->dst_reg, si->src_reg, 1661 bpf_target_off(struct bpf_sysctl_kern, write, 1662 sizeof_field(struct bpf_sysctl_kern, 1663 write), 1664 target_size)); 1665 break; 1666 case offsetof(struct bpf_sysctl, file_pos): 1667 /* ppos is a pointer so it should be accessed via indirect 1668 * loads and stores. Also for stores additional temporary 1669 * register is used since neither src_reg nor dst_reg can be 1670 * overridden. 1671 */ 1672 if (type == BPF_WRITE) { 1673 int treg = BPF_REG_9; 1674 1675 if (si->src_reg == treg || si->dst_reg == treg) 1676 --treg; 1677 if (si->src_reg == treg || si->dst_reg == treg) 1678 --treg; 1679 *insn++ = BPF_STX_MEM( 1680 BPF_DW, si->dst_reg, treg, 1681 offsetof(struct bpf_sysctl_kern, tmp_reg)); 1682 *insn++ = BPF_LDX_MEM( 1683 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos), 1684 treg, si->dst_reg, 1685 offsetof(struct bpf_sysctl_kern, ppos)); 1686 *insn++ = BPF_STX_MEM( 1687 BPF_SIZEOF(u32), treg, si->src_reg, 1688 bpf_ctx_narrow_access_offset( 1689 0, sizeof(u32), sizeof(loff_t))); 1690 *insn++ = BPF_LDX_MEM( 1691 BPF_DW, treg, si->dst_reg, 1692 offsetof(struct bpf_sysctl_kern, tmp_reg)); 1693 } else { 1694 *insn++ = BPF_LDX_MEM( 1695 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos), 1696 si->dst_reg, si->src_reg, 1697 offsetof(struct bpf_sysctl_kern, ppos)); 1698 read_size = bpf_size_to_bytes(BPF_SIZE(si->code)); 1699 *insn++ = BPF_LDX_MEM( 1700 BPF_SIZE(si->code), si->dst_reg, si->dst_reg, 1701 bpf_ctx_narrow_access_offset( 1702 0, read_size, sizeof(loff_t))); 1703 } 1704 *target_size = sizeof(u32); 1705 break; 1706 } 1707 1708 return insn - insn_buf; 1709 } 1710 1711 const struct bpf_verifier_ops cg_sysctl_verifier_ops = { 1712 .get_func_proto = sysctl_func_proto, 1713 .is_valid_access = sysctl_is_valid_access, 1714 .convert_ctx_access = sysctl_convert_ctx_access, 1715 }; 1716 1717 const struct bpf_prog_ops cg_sysctl_prog_ops = { 1718 }; 1719 1720 static const struct bpf_func_proto * 1721 cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1722 { 1723 switch (func_id) { 1724 #ifdef CONFIG_NET 1725 case BPF_FUNC_sk_storage_get: 1726 return &bpf_sk_storage_get_proto; 1727 case BPF_FUNC_sk_storage_delete: 1728 return &bpf_sk_storage_delete_proto; 1729 #endif 1730 #ifdef CONFIG_INET 1731 case BPF_FUNC_tcp_sock: 1732 return &bpf_tcp_sock_proto; 1733 #endif 1734 default: 1735 return cgroup_base_func_proto(func_id, prog); 1736 } 1737 } 1738 1739 static bool cg_sockopt_is_valid_access(int off, int size, 1740 enum bpf_access_type type, 1741 const struct bpf_prog *prog, 1742 struct bpf_insn_access_aux *info) 1743 { 1744 const int size_default = sizeof(__u32); 1745 1746 if (off < 0 || off >= sizeof(struct bpf_sockopt)) 1747 return false; 1748 1749 if (off % size != 0) 1750 return false; 1751 1752 if (type == BPF_WRITE) { 1753 switch (off) { 1754 case offsetof(struct bpf_sockopt, retval): 1755 if (size != size_default) 1756 return false; 1757 return prog->expected_attach_type == 1758 BPF_CGROUP_GETSOCKOPT; 1759 case offsetof(struct bpf_sockopt, optname): 1760 /* fallthrough */ 1761 case offsetof(struct bpf_sockopt, level): 1762 if (size != size_default) 1763 return false; 1764 return prog->expected_attach_type == 1765 BPF_CGROUP_SETSOCKOPT; 1766 case offsetof(struct bpf_sockopt, optlen): 1767 return size == size_default; 1768 default: 1769 return false; 1770 } 1771 } 1772 1773 switch (off) { 1774 case offsetof(struct bpf_sockopt, sk): 1775 if (size != sizeof(__u64)) 1776 return false; 1777 info->reg_type = PTR_TO_SOCKET; 1778 break; 1779 case offsetof(struct bpf_sockopt, optval): 1780 if (size != sizeof(__u64)) 1781 return false; 1782 info->reg_type = PTR_TO_PACKET; 1783 break; 1784 case offsetof(struct bpf_sockopt, optval_end): 1785 if (size != sizeof(__u64)) 1786 return false; 1787 info->reg_type = PTR_TO_PACKET_END; 1788 break; 1789 case offsetof(struct bpf_sockopt, retval): 1790 if (size != size_default) 1791 return false; 1792 return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT; 1793 default: 1794 if (size != size_default) 1795 return false; 1796 break; 1797 } 1798 return true; 1799 } 1800 1801 #define CG_SOCKOPT_ACCESS_FIELD(T, F) \ 1802 T(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F), \ 1803 si->dst_reg, si->src_reg, \ 1804 offsetof(struct bpf_sockopt_kern, F)) 1805 1806 static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type, 1807 const struct bpf_insn *si, 1808 struct bpf_insn *insn_buf, 1809 struct bpf_prog *prog, 1810 u32 *target_size) 1811 { 1812 struct bpf_insn *insn = insn_buf; 1813 1814 switch (si->off) { 1815 case offsetof(struct bpf_sockopt, sk): 1816 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, sk); 1817 break; 1818 case offsetof(struct bpf_sockopt, level): 1819 if (type == BPF_WRITE) 1820 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, level); 1821 else 1822 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, level); 1823 break; 1824 case offsetof(struct bpf_sockopt, optname): 1825 if (type == BPF_WRITE) 1826 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optname); 1827 else 1828 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optname); 1829 break; 1830 case offsetof(struct bpf_sockopt, optlen): 1831 if (type == BPF_WRITE) 1832 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optlen); 1833 else 1834 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optlen); 1835 break; 1836 case offsetof(struct bpf_sockopt, retval): 1837 if (type == BPF_WRITE) 1838 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, retval); 1839 else 1840 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, retval); 1841 break; 1842 case offsetof(struct bpf_sockopt, optval): 1843 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval); 1844 break; 1845 case offsetof(struct bpf_sockopt, optval_end): 1846 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval_end); 1847 break; 1848 } 1849 1850 return insn - insn_buf; 1851 } 1852 1853 static int cg_sockopt_get_prologue(struct bpf_insn *insn_buf, 1854 bool direct_write, 1855 const struct bpf_prog *prog) 1856 { 1857 /* Nothing to do for sockopt argument. The data is kzalloc'ated. 1858 */ 1859 return 0; 1860 } 1861 1862 const struct bpf_verifier_ops cg_sockopt_verifier_ops = { 1863 .get_func_proto = cg_sockopt_func_proto, 1864 .is_valid_access = cg_sockopt_is_valid_access, 1865 .convert_ctx_access = cg_sockopt_convert_ctx_access, 1866 .gen_prologue = cg_sockopt_get_prologue, 1867 }; 1868 1869 const struct bpf_prog_ops cg_sockopt_prog_ops = { 1870 }; 1871