1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Functions to manage eBPF programs attached to cgroups 4 * 5 * Copyright (c) 2016 Daniel Mack 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/atomic.h> 10 #include <linux/cgroup.h> 11 #include <linux/filter.h> 12 #include <linux/slab.h> 13 #include <linux/sysctl.h> 14 #include <linux/string.h> 15 #include <linux/bpf.h> 16 #include <linux/bpf-cgroup.h> 17 #include <net/sock.h> 18 #include <net/bpf_sk_storage.h> 19 20 #include "../cgroup/cgroup-internal.h" 21 22 DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key); 23 EXPORT_SYMBOL(cgroup_bpf_enabled_key); 24 25 void cgroup_bpf_offline(struct cgroup *cgrp) 26 { 27 cgroup_get(cgrp); 28 percpu_ref_kill(&cgrp->bpf.refcnt); 29 } 30 31 static void bpf_cgroup_storages_free(struct bpf_cgroup_storage *storages[]) 32 { 33 enum bpf_cgroup_storage_type stype; 34 35 for_each_cgroup_storage_type(stype) 36 bpf_cgroup_storage_free(storages[stype]); 37 } 38 39 static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage *storages[], 40 struct bpf_prog *prog) 41 { 42 enum bpf_cgroup_storage_type stype; 43 44 for_each_cgroup_storage_type(stype) { 45 storages[stype] = bpf_cgroup_storage_alloc(prog, stype); 46 if (IS_ERR(storages[stype])) { 47 storages[stype] = NULL; 48 bpf_cgroup_storages_free(storages); 49 return -ENOMEM; 50 } 51 } 52 53 return 0; 54 } 55 56 static void bpf_cgroup_storages_assign(struct bpf_cgroup_storage *dst[], 57 struct bpf_cgroup_storage *src[]) 58 { 59 enum bpf_cgroup_storage_type stype; 60 61 for_each_cgroup_storage_type(stype) 62 dst[stype] = src[stype]; 63 } 64 65 static void bpf_cgroup_storages_link(struct bpf_cgroup_storage *storages[], 66 struct cgroup* cgrp, 67 enum bpf_attach_type attach_type) 68 { 69 enum bpf_cgroup_storage_type stype; 70 71 for_each_cgroup_storage_type(stype) 72 bpf_cgroup_storage_link(storages[stype], cgrp, attach_type); 73 } 74 75 static void bpf_cgroup_storages_unlink(struct bpf_cgroup_storage *storages[]) 76 { 77 enum bpf_cgroup_storage_type stype; 78 79 for_each_cgroup_storage_type(stype) 80 bpf_cgroup_storage_unlink(storages[stype]); 81 } 82 83 /* Called when bpf_cgroup_link is auto-detached from dying cgroup. 84 * It drops cgroup and bpf_prog refcounts, and marks bpf_link as defunct. It 85 * doesn't free link memory, which will eventually be done by bpf_link's 86 * release() callback, when its last FD is closed. 87 */ 88 static void bpf_cgroup_link_auto_detach(struct bpf_cgroup_link *link) 89 { 90 cgroup_put(link->cgroup); 91 link->cgroup = NULL; 92 } 93 94 /** 95 * cgroup_bpf_release() - put references of all bpf programs and 96 * release all cgroup bpf data 97 * @work: work structure embedded into the cgroup to modify 98 */ 99 static void cgroup_bpf_release(struct work_struct *work) 100 { 101 struct cgroup *p, *cgrp = container_of(work, struct cgroup, 102 bpf.release_work); 103 struct bpf_prog_array *old_array; 104 unsigned int type; 105 106 mutex_lock(&cgroup_mutex); 107 108 for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) { 109 struct list_head *progs = &cgrp->bpf.progs[type]; 110 struct bpf_prog_list *pl, *tmp; 111 112 list_for_each_entry_safe(pl, tmp, progs, node) { 113 list_del(&pl->node); 114 if (pl->prog) 115 bpf_prog_put(pl->prog); 116 if (pl->link) 117 bpf_cgroup_link_auto_detach(pl->link); 118 bpf_cgroup_storages_unlink(pl->storage); 119 bpf_cgroup_storages_free(pl->storage); 120 kfree(pl); 121 static_branch_dec(&cgroup_bpf_enabled_key); 122 } 123 old_array = rcu_dereference_protected( 124 cgrp->bpf.effective[type], 125 lockdep_is_held(&cgroup_mutex)); 126 bpf_prog_array_free(old_array); 127 } 128 129 mutex_unlock(&cgroup_mutex); 130 131 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) 132 cgroup_bpf_put(p); 133 134 percpu_ref_exit(&cgrp->bpf.refcnt); 135 cgroup_put(cgrp); 136 } 137 138 /** 139 * cgroup_bpf_release_fn() - callback used to schedule releasing 140 * of bpf cgroup data 141 * @ref: percpu ref counter structure 142 */ 143 static void cgroup_bpf_release_fn(struct percpu_ref *ref) 144 { 145 struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt); 146 147 INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release); 148 queue_work(system_wq, &cgrp->bpf.release_work); 149 } 150 151 /* Get underlying bpf_prog of bpf_prog_list entry, regardless if it's through 152 * link or direct prog. 153 */ 154 static struct bpf_prog *prog_list_prog(struct bpf_prog_list *pl) 155 { 156 if (pl->prog) 157 return pl->prog; 158 if (pl->link) 159 return pl->link->link.prog; 160 return NULL; 161 } 162 163 /* count number of elements in the list. 164 * it's slow but the list cannot be long 165 */ 166 static u32 prog_list_length(struct list_head *head) 167 { 168 struct bpf_prog_list *pl; 169 u32 cnt = 0; 170 171 list_for_each_entry(pl, head, node) { 172 if (!prog_list_prog(pl)) 173 continue; 174 cnt++; 175 } 176 return cnt; 177 } 178 179 /* if parent has non-overridable prog attached, 180 * disallow attaching new programs to the descendent cgroup. 181 * if parent has overridable or multi-prog, allow attaching 182 */ 183 static bool hierarchy_allows_attach(struct cgroup *cgrp, 184 enum bpf_attach_type type) 185 { 186 struct cgroup *p; 187 188 p = cgroup_parent(cgrp); 189 if (!p) 190 return true; 191 do { 192 u32 flags = p->bpf.flags[type]; 193 u32 cnt; 194 195 if (flags & BPF_F_ALLOW_MULTI) 196 return true; 197 cnt = prog_list_length(&p->bpf.progs[type]); 198 WARN_ON_ONCE(cnt > 1); 199 if (cnt == 1) 200 return !!(flags & BPF_F_ALLOW_OVERRIDE); 201 p = cgroup_parent(p); 202 } while (p); 203 return true; 204 } 205 206 /* compute a chain of effective programs for a given cgroup: 207 * start from the list of programs in this cgroup and add 208 * all parent programs. 209 * Note that parent's F_ALLOW_OVERRIDE-type program is yielding 210 * to programs in this cgroup 211 */ 212 static int compute_effective_progs(struct cgroup *cgrp, 213 enum bpf_attach_type type, 214 struct bpf_prog_array **array) 215 { 216 struct bpf_prog_array_item *item; 217 struct bpf_prog_array *progs; 218 struct bpf_prog_list *pl; 219 struct cgroup *p = cgrp; 220 int cnt = 0; 221 222 /* count number of effective programs by walking parents */ 223 do { 224 if (cnt == 0 || (p->bpf.flags[type] & BPF_F_ALLOW_MULTI)) 225 cnt += prog_list_length(&p->bpf.progs[type]); 226 p = cgroup_parent(p); 227 } while (p); 228 229 progs = bpf_prog_array_alloc(cnt, GFP_KERNEL); 230 if (!progs) 231 return -ENOMEM; 232 233 /* populate the array with effective progs */ 234 cnt = 0; 235 p = cgrp; 236 do { 237 if (cnt > 0 && !(p->bpf.flags[type] & BPF_F_ALLOW_MULTI)) 238 continue; 239 240 list_for_each_entry(pl, &p->bpf.progs[type], node) { 241 if (!prog_list_prog(pl)) 242 continue; 243 244 item = &progs->items[cnt]; 245 item->prog = prog_list_prog(pl); 246 bpf_cgroup_storages_assign(item->cgroup_storage, 247 pl->storage); 248 cnt++; 249 } 250 } while ((p = cgroup_parent(p))); 251 252 *array = progs; 253 return 0; 254 } 255 256 static void activate_effective_progs(struct cgroup *cgrp, 257 enum bpf_attach_type type, 258 struct bpf_prog_array *old_array) 259 { 260 old_array = rcu_replace_pointer(cgrp->bpf.effective[type], old_array, 261 lockdep_is_held(&cgroup_mutex)); 262 /* free prog array after grace period, since __cgroup_bpf_run_*() 263 * might be still walking the array 264 */ 265 bpf_prog_array_free(old_array); 266 } 267 268 /** 269 * cgroup_bpf_inherit() - inherit effective programs from parent 270 * @cgrp: the cgroup to modify 271 */ 272 int cgroup_bpf_inherit(struct cgroup *cgrp) 273 { 274 /* has to use marco instead of const int, since compiler thinks 275 * that array below is variable length 276 */ 277 #define NR ARRAY_SIZE(cgrp->bpf.effective) 278 struct bpf_prog_array *arrays[NR] = {}; 279 struct cgroup *p; 280 int ret, i; 281 282 ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0, 283 GFP_KERNEL); 284 if (ret) 285 return ret; 286 287 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) 288 cgroup_bpf_get(p); 289 290 for (i = 0; i < NR; i++) 291 INIT_LIST_HEAD(&cgrp->bpf.progs[i]); 292 293 for (i = 0; i < NR; i++) 294 if (compute_effective_progs(cgrp, i, &arrays[i])) 295 goto cleanup; 296 297 for (i = 0; i < NR; i++) 298 activate_effective_progs(cgrp, i, arrays[i]); 299 300 return 0; 301 cleanup: 302 for (i = 0; i < NR; i++) 303 bpf_prog_array_free(arrays[i]); 304 305 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) 306 cgroup_bpf_put(p); 307 308 percpu_ref_exit(&cgrp->bpf.refcnt); 309 310 return -ENOMEM; 311 } 312 313 static int update_effective_progs(struct cgroup *cgrp, 314 enum bpf_attach_type type) 315 { 316 struct cgroup_subsys_state *css; 317 int err; 318 319 /* allocate and recompute effective prog arrays */ 320 css_for_each_descendant_pre(css, &cgrp->self) { 321 struct cgroup *desc = container_of(css, struct cgroup, self); 322 323 if (percpu_ref_is_zero(&desc->bpf.refcnt)) 324 continue; 325 326 err = compute_effective_progs(desc, type, &desc->bpf.inactive); 327 if (err) 328 goto cleanup; 329 } 330 331 /* all allocations were successful. Activate all prog arrays */ 332 css_for_each_descendant_pre(css, &cgrp->self) { 333 struct cgroup *desc = container_of(css, struct cgroup, self); 334 335 if (percpu_ref_is_zero(&desc->bpf.refcnt)) { 336 if (unlikely(desc->bpf.inactive)) { 337 bpf_prog_array_free(desc->bpf.inactive); 338 desc->bpf.inactive = NULL; 339 } 340 continue; 341 } 342 343 activate_effective_progs(desc, type, desc->bpf.inactive); 344 desc->bpf.inactive = NULL; 345 } 346 347 return 0; 348 349 cleanup: 350 /* oom while computing effective. Free all computed effective arrays 351 * since they were not activated 352 */ 353 css_for_each_descendant_pre(css, &cgrp->self) { 354 struct cgroup *desc = container_of(css, struct cgroup, self); 355 356 bpf_prog_array_free(desc->bpf.inactive); 357 desc->bpf.inactive = NULL; 358 } 359 360 return err; 361 } 362 363 #define BPF_CGROUP_MAX_PROGS 64 364 365 static struct bpf_prog_list *find_attach_entry(struct list_head *progs, 366 struct bpf_prog *prog, 367 struct bpf_cgroup_link *link, 368 struct bpf_prog *replace_prog, 369 bool allow_multi) 370 { 371 struct bpf_prog_list *pl; 372 373 /* single-attach case */ 374 if (!allow_multi) { 375 if (list_empty(progs)) 376 return NULL; 377 return list_first_entry(progs, typeof(*pl), node); 378 } 379 380 list_for_each_entry(pl, progs, node) { 381 if (prog && pl->prog == prog) 382 /* disallow attaching the same prog twice */ 383 return ERR_PTR(-EINVAL); 384 if (link && pl->link == link) 385 /* disallow attaching the same link twice */ 386 return ERR_PTR(-EINVAL); 387 } 388 389 /* direct prog multi-attach w/ replacement case */ 390 if (replace_prog) { 391 list_for_each_entry(pl, progs, node) { 392 if (pl->prog == replace_prog) 393 /* a match found */ 394 return pl; 395 } 396 /* prog to replace not found for cgroup */ 397 return ERR_PTR(-ENOENT); 398 } 399 400 return NULL; 401 } 402 403 /** 404 * __cgroup_bpf_attach() - Attach the program or the link to a cgroup, and 405 * propagate the change to descendants 406 * @cgrp: The cgroup which descendants to traverse 407 * @prog: A program to attach 408 * @link: A link to attach 409 * @replace_prog: Previously attached program to replace if BPF_F_REPLACE is set 410 * @type: Type of attach operation 411 * @flags: Option flags 412 * 413 * Exactly one of @prog or @link can be non-null. 414 * Must be called with cgroup_mutex held. 415 */ 416 int __cgroup_bpf_attach(struct cgroup *cgrp, 417 struct bpf_prog *prog, struct bpf_prog *replace_prog, 418 struct bpf_cgroup_link *link, 419 enum bpf_attach_type type, u32 flags) 420 { 421 u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI)); 422 struct list_head *progs = &cgrp->bpf.progs[type]; 423 struct bpf_prog *old_prog = NULL; 424 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {}; 425 struct bpf_cgroup_storage *old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {}; 426 struct bpf_prog_list *pl; 427 int err; 428 429 if (((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) || 430 ((flags & BPF_F_REPLACE) && !(flags & BPF_F_ALLOW_MULTI))) 431 /* invalid combination */ 432 return -EINVAL; 433 if (link && (prog || replace_prog)) 434 /* only either link or prog/replace_prog can be specified */ 435 return -EINVAL; 436 if (!!replace_prog != !!(flags & BPF_F_REPLACE)) 437 /* replace_prog implies BPF_F_REPLACE, and vice versa */ 438 return -EINVAL; 439 440 if (!hierarchy_allows_attach(cgrp, type)) 441 return -EPERM; 442 443 if (!list_empty(progs) && cgrp->bpf.flags[type] != saved_flags) 444 /* Disallow attaching non-overridable on top 445 * of existing overridable in this cgroup. 446 * Disallow attaching multi-prog if overridable or none 447 */ 448 return -EPERM; 449 450 if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS) 451 return -E2BIG; 452 453 pl = find_attach_entry(progs, prog, link, replace_prog, 454 flags & BPF_F_ALLOW_MULTI); 455 if (IS_ERR(pl)) 456 return PTR_ERR(pl); 457 458 if (bpf_cgroup_storages_alloc(storage, prog ? : link->link.prog)) 459 return -ENOMEM; 460 461 if (pl) { 462 old_prog = pl->prog; 463 bpf_cgroup_storages_unlink(pl->storage); 464 bpf_cgroup_storages_assign(old_storage, pl->storage); 465 } else { 466 pl = kmalloc(sizeof(*pl), GFP_KERNEL); 467 if (!pl) { 468 bpf_cgroup_storages_free(storage); 469 return -ENOMEM; 470 } 471 list_add_tail(&pl->node, progs); 472 } 473 474 pl->prog = prog; 475 pl->link = link; 476 bpf_cgroup_storages_assign(pl->storage, storage); 477 cgrp->bpf.flags[type] = saved_flags; 478 479 err = update_effective_progs(cgrp, type); 480 if (err) 481 goto cleanup; 482 483 bpf_cgroup_storages_free(old_storage); 484 if (old_prog) 485 bpf_prog_put(old_prog); 486 else 487 static_branch_inc(&cgroup_bpf_enabled_key); 488 bpf_cgroup_storages_link(pl->storage, cgrp, type); 489 return 0; 490 491 cleanup: 492 if (old_prog) { 493 pl->prog = old_prog; 494 pl->link = NULL; 495 } 496 bpf_cgroup_storages_free(pl->storage); 497 bpf_cgroup_storages_assign(pl->storage, old_storage); 498 bpf_cgroup_storages_link(pl->storage, cgrp, type); 499 if (!old_prog) { 500 list_del(&pl->node); 501 kfree(pl); 502 } 503 return err; 504 } 505 506 /* Swap updated BPF program for given link in effective program arrays across 507 * all descendant cgroups. This function is guaranteed to succeed. 508 */ 509 static void replace_effective_prog(struct cgroup *cgrp, 510 enum bpf_attach_type type, 511 struct bpf_cgroup_link *link) 512 { 513 struct bpf_prog_array_item *item; 514 struct cgroup_subsys_state *css; 515 struct bpf_prog_array *progs; 516 struct bpf_prog_list *pl; 517 struct list_head *head; 518 struct cgroup *cg; 519 int pos; 520 521 css_for_each_descendant_pre(css, &cgrp->self) { 522 struct cgroup *desc = container_of(css, struct cgroup, self); 523 524 if (percpu_ref_is_zero(&desc->bpf.refcnt)) 525 continue; 526 527 /* find position of link in effective progs array */ 528 for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) { 529 if (pos && !(cg->bpf.flags[type] & BPF_F_ALLOW_MULTI)) 530 continue; 531 532 head = &cg->bpf.progs[type]; 533 list_for_each_entry(pl, head, node) { 534 if (!prog_list_prog(pl)) 535 continue; 536 if (pl->link == link) 537 goto found; 538 pos++; 539 } 540 } 541 found: 542 BUG_ON(!cg); 543 progs = rcu_dereference_protected( 544 desc->bpf.effective[type], 545 lockdep_is_held(&cgroup_mutex)); 546 item = &progs->items[pos]; 547 WRITE_ONCE(item->prog, link->link.prog); 548 } 549 } 550 551 /** 552 * __cgroup_bpf_replace() - Replace link's program and propagate the change 553 * to descendants 554 * @cgrp: The cgroup which descendants to traverse 555 * @link: A link for which to replace BPF program 556 * @type: Type of attach operation 557 * 558 * Must be called with cgroup_mutex held. 559 */ 560 static int __cgroup_bpf_replace(struct cgroup *cgrp, 561 struct bpf_cgroup_link *link, 562 struct bpf_prog *new_prog) 563 { 564 struct list_head *progs = &cgrp->bpf.progs[link->type]; 565 struct bpf_prog *old_prog; 566 struct bpf_prog_list *pl; 567 bool found = false; 568 569 if (link->link.prog->type != new_prog->type) 570 return -EINVAL; 571 572 list_for_each_entry(pl, progs, node) { 573 if (pl->link == link) { 574 found = true; 575 break; 576 } 577 } 578 if (!found) 579 return -ENOENT; 580 581 old_prog = xchg(&link->link.prog, new_prog); 582 replace_effective_prog(cgrp, link->type, link); 583 bpf_prog_put(old_prog); 584 return 0; 585 } 586 587 static int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *new_prog, 588 struct bpf_prog *old_prog) 589 { 590 struct bpf_cgroup_link *cg_link; 591 int ret; 592 593 cg_link = container_of(link, struct bpf_cgroup_link, link); 594 595 mutex_lock(&cgroup_mutex); 596 /* link might have been auto-released by dying cgroup, so fail */ 597 if (!cg_link->cgroup) { 598 ret = -EINVAL; 599 goto out_unlock; 600 } 601 if (old_prog && link->prog != old_prog) { 602 ret = -EPERM; 603 goto out_unlock; 604 } 605 ret = __cgroup_bpf_replace(cg_link->cgroup, cg_link, new_prog); 606 out_unlock: 607 mutex_unlock(&cgroup_mutex); 608 return ret; 609 } 610 611 static struct bpf_prog_list *find_detach_entry(struct list_head *progs, 612 struct bpf_prog *prog, 613 struct bpf_cgroup_link *link, 614 bool allow_multi) 615 { 616 struct bpf_prog_list *pl; 617 618 if (!allow_multi) { 619 if (list_empty(progs)) 620 /* report error when trying to detach and nothing is attached */ 621 return ERR_PTR(-ENOENT); 622 623 /* to maintain backward compatibility NONE and OVERRIDE cgroups 624 * allow detaching with invalid FD (prog==NULL) in legacy mode 625 */ 626 return list_first_entry(progs, typeof(*pl), node); 627 } 628 629 if (!prog && !link) 630 /* to detach MULTI prog the user has to specify valid FD 631 * of the program or link to be detached 632 */ 633 return ERR_PTR(-EINVAL); 634 635 /* find the prog or link and detach it */ 636 list_for_each_entry(pl, progs, node) { 637 if (pl->prog == prog && pl->link == link) 638 return pl; 639 } 640 return ERR_PTR(-ENOENT); 641 } 642 643 /** 644 * __cgroup_bpf_detach() - Detach the program or link from a cgroup, and 645 * propagate the change to descendants 646 * @cgrp: The cgroup which descendants to traverse 647 * @prog: A program to detach or NULL 648 * @prog: A link to detach or NULL 649 * @type: Type of detach operation 650 * 651 * At most one of @prog or @link can be non-NULL. 652 * Must be called with cgroup_mutex held. 653 */ 654 int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, 655 struct bpf_cgroup_link *link, enum bpf_attach_type type) 656 { 657 struct list_head *progs = &cgrp->bpf.progs[type]; 658 u32 flags = cgrp->bpf.flags[type]; 659 struct bpf_prog_list *pl; 660 struct bpf_prog *old_prog; 661 int err; 662 663 if (prog && link) 664 /* only one of prog or link can be specified */ 665 return -EINVAL; 666 667 pl = find_detach_entry(progs, prog, link, flags & BPF_F_ALLOW_MULTI); 668 if (IS_ERR(pl)) 669 return PTR_ERR(pl); 670 671 /* mark it deleted, so it's ignored while recomputing effective */ 672 old_prog = pl->prog; 673 pl->prog = NULL; 674 pl->link = NULL; 675 676 err = update_effective_progs(cgrp, type); 677 if (err) 678 goto cleanup; 679 680 /* now can actually delete it from this cgroup list */ 681 list_del(&pl->node); 682 bpf_cgroup_storages_unlink(pl->storage); 683 bpf_cgroup_storages_free(pl->storage); 684 kfree(pl); 685 if (list_empty(progs)) 686 /* last program was detached, reset flags to zero */ 687 cgrp->bpf.flags[type] = 0; 688 if (old_prog) 689 bpf_prog_put(old_prog); 690 static_branch_dec(&cgroup_bpf_enabled_key); 691 return 0; 692 693 cleanup: 694 /* restore back prog or link */ 695 pl->prog = old_prog; 696 pl->link = link; 697 return err; 698 } 699 700 /* Must be called with cgroup_mutex held to avoid races. */ 701 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, 702 union bpf_attr __user *uattr) 703 { 704 __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); 705 enum bpf_attach_type type = attr->query.attach_type; 706 struct list_head *progs = &cgrp->bpf.progs[type]; 707 u32 flags = cgrp->bpf.flags[type]; 708 struct bpf_prog_array *effective; 709 struct bpf_prog *prog; 710 int cnt, ret = 0, i; 711 712 effective = rcu_dereference_protected(cgrp->bpf.effective[type], 713 lockdep_is_held(&cgroup_mutex)); 714 715 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) 716 cnt = bpf_prog_array_length(effective); 717 else 718 cnt = prog_list_length(progs); 719 720 if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags))) 721 return -EFAULT; 722 if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt))) 723 return -EFAULT; 724 if (attr->query.prog_cnt == 0 || !prog_ids || !cnt) 725 /* return early if user requested only program count + flags */ 726 return 0; 727 if (attr->query.prog_cnt < cnt) { 728 cnt = attr->query.prog_cnt; 729 ret = -ENOSPC; 730 } 731 732 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) { 733 return bpf_prog_array_copy_to_user(effective, prog_ids, cnt); 734 } else { 735 struct bpf_prog_list *pl; 736 u32 id; 737 738 i = 0; 739 list_for_each_entry(pl, progs, node) { 740 prog = prog_list_prog(pl); 741 id = prog->aux->id; 742 if (copy_to_user(prog_ids + i, &id, sizeof(id))) 743 return -EFAULT; 744 if (++i == cnt) 745 break; 746 } 747 } 748 return ret; 749 } 750 751 int cgroup_bpf_prog_attach(const union bpf_attr *attr, 752 enum bpf_prog_type ptype, struct bpf_prog *prog) 753 { 754 struct bpf_prog *replace_prog = NULL; 755 struct cgroup *cgrp; 756 int ret; 757 758 cgrp = cgroup_get_from_fd(attr->target_fd); 759 if (IS_ERR(cgrp)) 760 return PTR_ERR(cgrp); 761 762 if ((attr->attach_flags & BPF_F_ALLOW_MULTI) && 763 (attr->attach_flags & BPF_F_REPLACE)) { 764 replace_prog = bpf_prog_get_type(attr->replace_bpf_fd, ptype); 765 if (IS_ERR(replace_prog)) { 766 cgroup_put(cgrp); 767 return PTR_ERR(replace_prog); 768 } 769 } 770 771 ret = cgroup_bpf_attach(cgrp, prog, replace_prog, NULL, 772 attr->attach_type, attr->attach_flags); 773 774 if (replace_prog) 775 bpf_prog_put(replace_prog); 776 cgroup_put(cgrp); 777 return ret; 778 } 779 780 int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) 781 { 782 struct bpf_prog *prog; 783 struct cgroup *cgrp; 784 int ret; 785 786 cgrp = cgroup_get_from_fd(attr->target_fd); 787 if (IS_ERR(cgrp)) 788 return PTR_ERR(cgrp); 789 790 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 791 if (IS_ERR(prog)) 792 prog = NULL; 793 794 ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type); 795 if (prog) 796 bpf_prog_put(prog); 797 798 cgroup_put(cgrp); 799 return ret; 800 } 801 802 static void bpf_cgroup_link_release(struct bpf_link *link) 803 { 804 struct bpf_cgroup_link *cg_link = 805 container_of(link, struct bpf_cgroup_link, link); 806 807 /* link might have been auto-detached by dying cgroup already, 808 * in that case our work is done here 809 */ 810 if (!cg_link->cgroup) 811 return; 812 813 mutex_lock(&cgroup_mutex); 814 815 /* re-check cgroup under lock again */ 816 if (!cg_link->cgroup) { 817 mutex_unlock(&cgroup_mutex); 818 return; 819 } 820 821 WARN_ON(__cgroup_bpf_detach(cg_link->cgroup, NULL, cg_link, 822 cg_link->type)); 823 824 mutex_unlock(&cgroup_mutex); 825 cgroup_put(cg_link->cgroup); 826 } 827 828 static void bpf_cgroup_link_dealloc(struct bpf_link *link) 829 { 830 struct bpf_cgroup_link *cg_link = 831 container_of(link, struct bpf_cgroup_link, link); 832 833 kfree(cg_link); 834 } 835 836 const struct bpf_link_ops bpf_cgroup_link_lops = { 837 .release = bpf_cgroup_link_release, 838 .dealloc = bpf_cgroup_link_dealloc, 839 .update_prog = cgroup_bpf_replace, 840 }; 841 842 int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 843 { 844 struct bpf_link_primer link_primer; 845 struct bpf_cgroup_link *link; 846 struct cgroup *cgrp; 847 int err; 848 849 if (attr->link_create.flags) 850 return -EINVAL; 851 852 cgrp = cgroup_get_from_fd(attr->link_create.target_fd); 853 if (IS_ERR(cgrp)) 854 return PTR_ERR(cgrp); 855 856 link = kzalloc(sizeof(*link), GFP_USER); 857 if (!link) { 858 err = -ENOMEM; 859 goto out_put_cgroup; 860 } 861 bpf_link_init(&link->link, &bpf_cgroup_link_lops, prog); 862 link->cgroup = cgrp; 863 link->type = attr->link_create.attach_type; 864 865 err = bpf_link_prime(&link->link, &link_primer); 866 if (err) { 867 kfree(link); 868 goto out_put_cgroup; 869 } 870 871 err = cgroup_bpf_attach(cgrp, NULL, NULL, link, link->type, 872 BPF_F_ALLOW_MULTI); 873 if (err) { 874 bpf_link_cleanup(&link_primer); 875 goto out_put_cgroup; 876 } 877 878 return bpf_link_settle(&link_primer); 879 880 out_put_cgroup: 881 cgroup_put(cgrp); 882 return err; 883 } 884 885 int cgroup_bpf_prog_query(const union bpf_attr *attr, 886 union bpf_attr __user *uattr) 887 { 888 struct cgroup *cgrp; 889 int ret; 890 891 cgrp = cgroup_get_from_fd(attr->query.target_fd); 892 if (IS_ERR(cgrp)) 893 return PTR_ERR(cgrp); 894 895 ret = cgroup_bpf_query(cgrp, attr, uattr); 896 897 cgroup_put(cgrp); 898 return ret; 899 } 900 901 /** 902 * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering 903 * @sk: The socket sending or receiving traffic 904 * @skb: The skb that is being sent or received 905 * @type: The type of program to be exectuted 906 * 907 * If no socket is passed, or the socket is not of type INET or INET6, 908 * this function does nothing and returns 0. 909 * 910 * The program type passed in via @type must be suitable for network 911 * filtering. No further check is performed to assert that. 912 * 913 * For egress packets, this function can return: 914 * NET_XMIT_SUCCESS (0) - continue with packet output 915 * NET_XMIT_DROP (1) - drop packet and notify TCP to call cwr 916 * NET_XMIT_CN (2) - continue with packet output and notify TCP 917 * to call cwr 918 * -EPERM - drop packet 919 * 920 * For ingress packets, this function will return -EPERM if any 921 * attached program was found and if it returned != 1 during execution. 922 * Otherwise 0 is returned. 923 */ 924 int __cgroup_bpf_run_filter_skb(struct sock *sk, 925 struct sk_buff *skb, 926 enum bpf_attach_type type) 927 { 928 unsigned int offset = skb->data - skb_network_header(skb); 929 struct sock *save_sk; 930 void *saved_data_end; 931 struct cgroup *cgrp; 932 int ret; 933 934 if (!sk || !sk_fullsock(sk)) 935 return 0; 936 937 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6) 938 return 0; 939 940 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 941 save_sk = skb->sk; 942 skb->sk = sk; 943 __skb_push(skb, offset); 944 945 /* compute pointers for the bpf prog */ 946 bpf_compute_and_save_data_end(skb, &saved_data_end); 947 948 if (type == BPF_CGROUP_INET_EGRESS) { 949 ret = BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY( 950 cgrp->bpf.effective[type], skb, __bpf_prog_run_save_cb); 951 } else { 952 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb, 953 __bpf_prog_run_save_cb); 954 ret = (ret == 1 ? 0 : -EPERM); 955 } 956 bpf_restore_data_end(skb, saved_data_end); 957 __skb_pull(skb, offset); 958 skb->sk = save_sk; 959 960 return ret; 961 } 962 EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb); 963 964 /** 965 * __cgroup_bpf_run_filter_sk() - Run a program on a sock 966 * @sk: sock structure to manipulate 967 * @type: The type of program to be exectuted 968 * 969 * socket is passed is expected to be of type INET or INET6. 970 * 971 * The program type passed in via @type must be suitable for sock 972 * filtering. No further check is performed to assert that. 973 * 974 * This function will return %-EPERM if any if an attached program was found 975 * and if it returned != 1 during execution. In all other cases, 0 is returned. 976 */ 977 int __cgroup_bpf_run_filter_sk(struct sock *sk, 978 enum bpf_attach_type type) 979 { 980 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 981 int ret; 982 983 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sk, BPF_PROG_RUN); 984 return ret == 1 ? 0 : -EPERM; 985 } 986 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk); 987 988 /** 989 * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and 990 * provided by user sockaddr 991 * @sk: sock struct that will use sockaddr 992 * @uaddr: sockaddr struct provided by user 993 * @type: The type of program to be exectuted 994 * @t_ctx: Pointer to attach type specific context 995 * 996 * socket is expected to be of type INET or INET6. 997 * 998 * This function will return %-EPERM if an attached program is found and 999 * returned value != 1 during execution. In all other cases, 0 is returned. 1000 */ 1001 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, 1002 struct sockaddr *uaddr, 1003 enum bpf_attach_type type, 1004 void *t_ctx) 1005 { 1006 struct bpf_sock_addr_kern ctx = { 1007 .sk = sk, 1008 .uaddr = uaddr, 1009 .t_ctx = t_ctx, 1010 }; 1011 struct sockaddr_storage unspec; 1012 struct cgroup *cgrp; 1013 int ret; 1014 1015 /* Check socket family since not all sockets represent network 1016 * endpoint (e.g. AF_UNIX). 1017 */ 1018 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6) 1019 return 0; 1020 1021 if (!ctx.uaddr) { 1022 memset(&unspec, 0, sizeof(unspec)); 1023 ctx.uaddr = (struct sockaddr *)&unspec; 1024 } 1025 1026 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1027 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN); 1028 1029 return ret == 1 ? 0 : -EPERM; 1030 } 1031 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr); 1032 1033 /** 1034 * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock 1035 * @sk: socket to get cgroup from 1036 * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains 1037 * sk with connection information (IP addresses, etc.) May not contain 1038 * cgroup info if it is a req sock. 1039 * @type: The type of program to be exectuted 1040 * 1041 * socket passed is expected to be of type INET or INET6. 1042 * 1043 * The program type passed in via @type must be suitable for sock_ops 1044 * filtering. No further check is performed to assert that. 1045 * 1046 * This function will return %-EPERM if any if an attached program was found 1047 * and if it returned != 1 during execution. In all other cases, 0 is returned. 1048 */ 1049 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, 1050 struct bpf_sock_ops_kern *sock_ops, 1051 enum bpf_attach_type type) 1052 { 1053 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1054 int ret; 1055 1056 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sock_ops, 1057 BPF_PROG_RUN); 1058 return ret == 1 ? 0 : -EPERM; 1059 } 1060 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops); 1061 1062 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, 1063 short access, enum bpf_attach_type type) 1064 { 1065 struct cgroup *cgrp; 1066 struct bpf_cgroup_dev_ctx ctx = { 1067 .access_type = (access << 16) | dev_type, 1068 .major = major, 1069 .minor = minor, 1070 }; 1071 int allow = 1; 1072 1073 rcu_read_lock(); 1074 cgrp = task_dfl_cgroup(current); 1075 allow = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, 1076 BPF_PROG_RUN); 1077 rcu_read_unlock(); 1078 1079 return !allow; 1080 } 1081 1082 static const struct bpf_func_proto * 1083 cgroup_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1084 { 1085 switch (func_id) { 1086 case BPF_FUNC_get_current_uid_gid: 1087 return &bpf_get_current_uid_gid_proto; 1088 case BPF_FUNC_get_local_storage: 1089 return &bpf_get_local_storage_proto; 1090 case BPF_FUNC_get_current_cgroup_id: 1091 return &bpf_get_current_cgroup_id_proto; 1092 case BPF_FUNC_perf_event_output: 1093 return &bpf_event_output_data_proto; 1094 default: 1095 return bpf_base_func_proto(func_id); 1096 } 1097 } 1098 1099 static const struct bpf_func_proto * 1100 cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1101 { 1102 return cgroup_base_func_proto(func_id, prog); 1103 } 1104 1105 static bool cgroup_dev_is_valid_access(int off, int size, 1106 enum bpf_access_type type, 1107 const struct bpf_prog *prog, 1108 struct bpf_insn_access_aux *info) 1109 { 1110 const int size_default = sizeof(__u32); 1111 1112 if (type == BPF_WRITE) 1113 return false; 1114 1115 if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx)) 1116 return false; 1117 /* The verifier guarantees that size > 0. */ 1118 if (off % size != 0) 1119 return false; 1120 1121 switch (off) { 1122 case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type): 1123 bpf_ctx_record_field_size(info, size_default); 1124 if (!bpf_ctx_narrow_access_ok(off, size, size_default)) 1125 return false; 1126 break; 1127 default: 1128 if (size != size_default) 1129 return false; 1130 } 1131 1132 return true; 1133 } 1134 1135 const struct bpf_prog_ops cg_dev_prog_ops = { 1136 }; 1137 1138 const struct bpf_verifier_ops cg_dev_verifier_ops = { 1139 .get_func_proto = cgroup_dev_func_proto, 1140 .is_valid_access = cgroup_dev_is_valid_access, 1141 }; 1142 1143 /** 1144 * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl 1145 * 1146 * @head: sysctl table header 1147 * @table: sysctl table 1148 * @write: sysctl is being read (= 0) or written (= 1) 1149 * @buf: pointer to buffer (in and out) 1150 * @pcount: value-result argument: value is size of buffer pointed to by @buf, 1151 * result is size of @new_buf if program set new value, initial value 1152 * otherwise 1153 * @ppos: value-result argument: value is position at which read from or write 1154 * to sysctl is happening, result is new position if program overrode it, 1155 * initial value otherwise 1156 * @type: type of program to be executed 1157 * 1158 * Program is run when sysctl is being accessed, either read or written, and 1159 * can allow or deny such access. 1160 * 1161 * This function will return %-EPERM if an attached program is found and 1162 * returned value != 1 during execution. In all other cases 0 is returned. 1163 */ 1164 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, 1165 struct ctl_table *table, int write, 1166 void **buf, size_t *pcount, loff_t *ppos, 1167 enum bpf_attach_type type) 1168 { 1169 struct bpf_sysctl_kern ctx = { 1170 .head = head, 1171 .table = table, 1172 .write = write, 1173 .ppos = ppos, 1174 .cur_val = NULL, 1175 .cur_len = PAGE_SIZE, 1176 .new_val = NULL, 1177 .new_len = 0, 1178 .new_updated = 0, 1179 }; 1180 struct cgroup *cgrp; 1181 loff_t pos = 0; 1182 int ret; 1183 1184 ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL); 1185 if (!ctx.cur_val || 1186 table->proc_handler(table, 0, ctx.cur_val, &ctx.cur_len, &pos)) { 1187 /* Let BPF program decide how to proceed. */ 1188 ctx.cur_len = 0; 1189 } 1190 1191 if (write && *buf && *pcount) { 1192 /* BPF program should be able to override new value with a 1193 * buffer bigger than provided by user. 1194 */ 1195 ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL); 1196 ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount); 1197 if (ctx.new_val) { 1198 memcpy(ctx.new_val, *buf, ctx.new_len); 1199 } else { 1200 /* Let BPF program decide how to proceed. */ 1201 ctx.new_len = 0; 1202 } 1203 } 1204 1205 rcu_read_lock(); 1206 cgrp = task_dfl_cgroup(current); 1207 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN); 1208 rcu_read_unlock(); 1209 1210 kfree(ctx.cur_val); 1211 1212 if (ret == 1 && ctx.new_updated) { 1213 kfree(*buf); 1214 *buf = ctx.new_val; 1215 *pcount = ctx.new_len; 1216 } else { 1217 kfree(ctx.new_val); 1218 } 1219 1220 return ret == 1 ? 0 : -EPERM; 1221 } 1222 1223 #ifdef CONFIG_NET 1224 static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp, 1225 enum bpf_attach_type attach_type) 1226 { 1227 struct bpf_prog_array *prog_array; 1228 bool empty; 1229 1230 rcu_read_lock(); 1231 prog_array = rcu_dereference(cgrp->bpf.effective[attach_type]); 1232 empty = bpf_prog_array_is_empty(prog_array); 1233 rcu_read_unlock(); 1234 1235 return empty; 1236 } 1237 1238 static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen) 1239 { 1240 if (unlikely(max_optlen > PAGE_SIZE) || max_optlen < 0) 1241 return -EINVAL; 1242 1243 ctx->optval = kzalloc(max_optlen, GFP_USER); 1244 if (!ctx->optval) 1245 return -ENOMEM; 1246 1247 ctx->optval_end = ctx->optval + max_optlen; 1248 1249 return 0; 1250 } 1251 1252 static void sockopt_free_buf(struct bpf_sockopt_kern *ctx) 1253 { 1254 kfree(ctx->optval); 1255 } 1256 1257 int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level, 1258 int *optname, char __user *optval, 1259 int *optlen, char **kernel_optval) 1260 { 1261 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1262 struct bpf_sockopt_kern ctx = { 1263 .sk = sk, 1264 .level = *level, 1265 .optname = *optname, 1266 }; 1267 int ret, max_optlen; 1268 1269 /* Opportunistic check to see whether we have any BPF program 1270 * attached to the hook so we don't waste time allocating 1271 * memory and locking the socket. 1272 */ 1273 if (!cgroup_bpf_enabled || 1274 __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_SETSOCKOPT)) 1275 return 0; 1276 1277 /* Allocate a bit more than the initial user buffer for 1278 * BPF program. The canonical use case is overriding 1279 * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic). 1280 */ 1281 max_optlen = max_t(int, 16, *optlen); 1282 1283 ret = sockopt_alloc_buf(&ctx, max_optlen); 1284 if (ret) 1285 return ret; 1286 1287 ctx.optlen = *optlen; 1288 1289 if (copy_from_user(ctx.optval, optval, *optlen) != 0) { 1290 ret = -EFAULT; 1291 goto out; 1292 } 1293 1294 lock_sock(sk); 1295 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_SETSOCKOPT], 1296 &ctx, BPF_PROG_RUN); 1297 release_sock(sk); 1298 1299 if (!ret) { 1300 ret = -EPERM; 1301 goto out; 1302 } 1303 1304 if (ctx.optlen == -1) { 1305 /* optlen set to -1, bypass kernel */ 1306 ret = 1; 1307 } else if (ctx.optlen > max_optlen || ctx.optlen < -1) { 1308 /* optlen is out of bounds */ 1309 ret = -EFAULT; 1310 } else { 1311 /* optlen within bounds, run kernel handler */ 1312 ret = 0; 1313 1314 /* export any potential modifications */ 1315 *level = ctx.level; 1316 *optname = ctx.optname; 1317 *optlen = ctx.optlen; 1318 *kernel_optval = ctx.optval; 1319 } 1320 1321 out: 1322 if (ret) 1323 sockopt_free_buf(&ctx); 1324 return ret; 1325 } 1326 1327 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, 1328 int optname, char __user *optval, 1329 int __user *optlen, int max_optlen, 1330 int retval) 1331 { 1332 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1333 struct bpf_sockopt_kern ctx = { 1334 .sk = sk, 1335 .level = level, 1336 .optname = optname, 1337 .retval = retval, 1338 }; 1339 int ret; 1340 1341 /* Opportunistic check to see whether we have any BPF program 1342 * attached to the hook so we don't waste time allocating 1343 * memory and locking the socket. 1344 */ 1345 if (!cgroup_bpf_enabled || 1346 __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT)) 1347 return retval; 1348 1349 ret = sockopt_alloc_buf(&ctx, max_optlen); 1350 if (ret) 1351 return ret; 1352 1353 ctx.optlen = max_optlen; 1354 1355 if (!retval) { 1356 /* If kernel getsockopt finished successfully, 1357 * copy whatever was returned to the user back 1358 * into our temporary buffer. Set optlen to the 1359 * one that kernel returned as well to let 1360 * BPF programs inspect the value. 1361 */ 1362 1363 if (get_user(ctx.optlen, optlen)) { 1364 ret = -EFAULT; 1365 goto out; 1366 } 1367 1368 if (ctx.optlen > max_optlen) 1369 ctx.optlen = max_optlen; 1370 1371 if (copy_from_user(ctx.optval, optval, ctx.optlen) != 0) { 1372 ret = -EFAULT; 1373 goto out; 1374 } 1375 } 1376 1377 lock_sock(sk); 1378 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_GETSOCKOPT], 1379 &ctx, BPF_PROG_RUN); 1380 release_sock(sk); 1381 1382 if (!ret) { 1383 ret = -EPERM; 1384 goto out; 1385 } 1386 1387 if (ctx.optlen > max_optlen) { 1388 ret = -EFAULT; 1389 goto out; 1390 } 1391 1392 /* BPF programs only allowed to set retval to 0, not some 1393 * arbitrary value. 1394 */ 1395 if (ctx.retval != 0 && ctx.retval != retval) { 1396 ret = -EFAULT; 1397 goto out; 1398 } 1399 1400 if (copy_to_user(optval, ctx.optval, ctx.optlen) || 1401 put_user(ctx.optlen, optlen)) { 1402 ret = -EFAULT; 1403 goto out; 1404 } 1405 1406 ret = ctx.retval; 1407 1408 out: 1409 sockopt_free_buf(&ctx); 1410 return ret; 1411 } 1412 #endif 1413 1414 static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp, 1415 size_t *lenp) 1416 { 1417 ssize_t tmp_ret = 0, ret; 1418 1419 if (dir->header.parent) { 1420 tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp); 1421 if (tmp_ret < 0) 1422 return tmp_ret; 1423 } 1424 1425 ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp); 1426 if (ret < 0) 1427 return ret; 1428 *bufp += ret; 1429 *lenp -= ret; 1430 ret += tmp_ret; 1431 1432 /* Avoid leading slash. */ 1433 if (!ret) 1434 return ret; 1435 1436 tmp_ret = strscpy(*bufp, "/", *lenp); 1437 if (tmp_ret < 0) 1438 return tmp_ret; 1439 *bufp += tmp_ret; 1440 *lenp -= tmp_ret; 1441 1442 return ret + tmp_ret; 1443 } 1444 1445 BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf, 1446 size_t, buf_len, u64, flags) 1447 { 1448 ssize_t tmp_ret = 0, ret; 1449 1450 if (!buf) 1451 return -EINVAL; 1452 1453 if (!(flags & BPF_F_SYSCTL_BASE_NAME)) { 1454 if (!ctx->head) 1455 return -EINVAL; 1456 tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len); 1457 if (tmp_ret < 0) 1458 return tmp_ret; 1459 } 1460 1461 ret = strscpy(buf, ctx->table->procname, buf_len); 1462 1463 return ret < 0 ? ret : tmp_ret + ret; 1464 } 1465 1466 static const struct bpf_func_proto bpf_sysctl_get_name_proto = { 1467 .func = bpf_sysctl_get_name, 1468 .gpl_only = false, 1469 .ret_type = RET_INTEGER, 1470 .arg1_type = ARG_PTR_TO_CTX, 1471 .arg2_type = ARG_PTR_TO_MEM, 1472 .arg3_type = ARG_CONST_SIZE, 1473 .arg4_type = ARG_ANYTHING, 1474 }; 1475 1476 static int copy_sysctl_value(char *dst, size_t dst_len, char *src, 1477 size_t src_len) 1478 { 1479 if (!dst) 1480 return -EINVAL; 1481 1482 if (!dst_len) 1483 return -E2BIG; 1484 1485 if (!src || !src_len) { 1486 memset(dst, 0, dst_len); 1487 return -EINVAL; 1488 } 1489 1490 memcpy(dst, src, min(dst_len, src_len)); 1491 1492 if (dst_len > src_len) { 1493 memset(dst + src_len, '\0', dst_len - src_len); 1494 return src_len; 1495 } 1496 1497 dst[dst_len - 1] = '\0'; 1498 1499 return -E2BIG; 1500 } 1501 1502 BPF_CALL_3(bpf_sysctl_get_current_value, struct bpf_sysctl_kern *, ctx, 1503 char *, buf, size_t, buf_len) 1504 { 1505 return copy_sysctl_value(buf, buf_len, ctx->cur_val, ctx->cur_len); 1506 } 1507 1508 static const struct bpf_func_proto bpf_sysctl_get_current_value_proto = { 1509 .func = bpf_sysctl_get_current_value, 1510 .gpl_only = false, 1511 .ret_type = RET_INTEGER, 1512 .arg1_type = ARG_PTR_TO_CTX, 1513 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1514 .arg3_type = ARG_CONST_SIZE, 1515 }; 1516 1517 BPF_CALL_3(bpf_sysctl_get_new_value, struct bpf_sysctl_kern *, ctx, char *, buf, 1518 size_t, buf_len) 1519 { 1520 if (!ctx->write) { 1521 if (buf && buf_len) 1522 memset(buf, '\0', buf_len); 1523 return -EINVAL; 1524 } 1525 return copy_sysctl_value(buf, buf_len, ctx->new_val, ctx->new_len); 1526 } 1527 1528 static const struct bpf_func_proto bpf_sysctl_get_new_value_proto = { 1529 .func = bpf_sysctl_get_new_value, 1530 .gpl_only = false, 1531 .ret_type = RET_INTEGER, 1532 .arg1_type = ARG_PTR_TO_CTX, 1533 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1534 .arg3_type = ARG_CONST_SIZE, 1535 }; 1536 1537 BPF_CALL_3(bpf_sysctl_set_new_value, struct bpf_sysctl_kern *, ctx, 1538 const char *, buf, size_t, buf_len) 1539 { 1540 if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len) 1541 return -EINVAL; 1542 1543 if (buf_len > PAGE_SIZE - 1) 1544 return -E2BIG; 1545 1546 memcpy(ctx->new_val, buf, buf_len); 1547 ctx->new_len = buf_len; 1548 ctx->new_updated = 1; 1549 1550 return 0; 1551 } 1552 1553 static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = { 1554 .func = bpf_sysctl_set_new_value, 1555 .gpl_only = false, 1556 .ret_type = RET_INTEGER, 1557 .arg1_type = ARG_PTR_TO_CTX, 1558 .arg2_type = ARG_PTR_TO_MEM, 1559 .arg3_type = ARG_CONST_SIZE, 1560 }; 1561 1562 static const struct bpf_func_proto * 1563 sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1564 { 1565 switch (func_id) { 1566 case BPF_FUNC_strtol: 1567 return &bpf_strtol_proto; 1568 case BPF_FUNC_strtoul: 1569 return &bpf_strtoul_proto; 1570 case BPF_FUNC_sysctl_get_name: 1571 return &bpf_sysctl_get_name_proto; 1572 case BPF_FUNC_sysctl_get_current_value: 1573 return &bpf_sysctl_get_current_value_proto; 1574 case BPF_FUNC_sysctl_get_new_value: 1575 return &bpf_sysctl_get_new_value_proto; 1576 case BPF_FUNC_sysctl_set_new_value: 1577 return &bpf_sysctl_set_new_value_proto; 1578 default: 1579 return cgroup_base_func_proto(func_id, prog); 1580 } 1581 } 1582 1583 static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type, 1584 const struct bpf_prog *prog, 1585 struct bpf_insn_access_aux *info) 1586 { 1587 const int size_default = sizeof(__u32); 1588 1589 if (off < 0 || off + size > sizeof(struct bpf_sysctl) || off % size) 1590 return false; 1591 1592 switch (off) { 1593 case bpf_ctx_range(struct bpf_sysctl, write): 1594 if (type != BPF_READ) 1595 return false; 1596 bpf_ctx_record_field_size(info, size_default); 1597 return bpf_ctx_narrow_access_ok(off, size, size_default); 1598 case bpf_ctx_range(struct bpf_sysctl, file_pos): 1599 if (type == BPF_READ) { 1600 bpf_ctx_record_field_size(info, size_default); 1601 return bpf_ctx_narrow_access_ok(off, size, size_default); 1602 } else { 1603 return size == size_default; 1604 } 1605 default: 1606 return false; 1607 } 1608 } 1609 1610 static u32 sysctl_convert_ctx_access(enum bpf_access_type type, 1611 const struct bpf_insn *si, 1612 struct bpf_insn *insn_buf, 1613 struct bpf_prog *prog, u32 *target_size) 1614 { 1615 struct bpf_insn *insn = insn_buf; 1616 u32 read_size; 1617 1618 switch (si->off) { 1619 case offsetof(struct bpf_sysctl, write): 1620 *insn++ = BPF_LDX_MEM( 1621 BPF_SIZE(si->code), si->dst_reg, si->src_reg, 1622 bpf_target_off(struct bpf_sysctl_kern, write, 1623 sizeof_field(struct bpf_sysctl_kern, 1624 write), 1625 target_size)); 1626 break; 1627 case offsetof(struct bpf_sysctl, file_pos): 1628 /* ppos is a pointer so it should be accessed via indirect 1629 * loads and stores. Also for stores additional temporary 1630 * register is used since neither src_reg nor dst_reg can be 1631 * overridden. 1632 */ 1633 if (type == BPF_WRITE) { 1634 int treg = BPF_REG_9; 1635 1636 if (si->src_reg == treg || si->dst_reg == treg) 1637 --treg; 1638 if (si->src_reg == treg || si->dst_reg == treg) 1639 --treg; 1640 *insn++ = BPF_STX_MEM( 1641 BPF_DW, si->dst_reg, treg, 1642 offsetof(struct bpf_sysctl_kern, tmp_reg)); 1643 *insn++ = BPF_LDX_MEM( 1644 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos), 1645 treg, si->dst_reg, 1646 offsetof(struct bpf_sysctl_kern, ppos)); 1647 *insn++ = BPF_STX_MEM( 1648 BPF_SIZEOF(u32), treg, si->src_reg, 1649 bpf_ctx_narrow_access_offset( 1650 0, sizeof(u32), sizeof(loff_t))); 1651 *insn++ = BPF_LDX_MEM( 1652 BPF_DW, treg, si->dst_reg, 1653 offsetof(struct bpf_sysctl_kern, tmp_reg)); 1654 } else { 1655 *insn++ = BPF_LDX_MEM( 1656 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos), 1657 si->dst_reg, si->src_reg, 1658 offsetof(struct bpf_sysctl_kern, ppos)); 1659 read_size = bpf_size_to_bytes(BPF_SIZE(si->code)); 1660 *insn++ = BPF_LDX_MEM( 1661 BPF_SIZE(si->code), si->dst_reg, si->dst_reg, 1662 bpf_ctx_narrow_access_offset( 1663 0, read_size, sizeof(loff_t))); 1664 } 1665 *target_size = sizeof(u32); 1666 break; 1667 } 1668 1669 return insn - insn_buf; 1670 } 1671 1672 const struct bpf_verifier_ops cg_sysctl_verifier_ops = { 1673 .get_func_proto = sysctl_func_proto, 1674 .is_valid_access = sysctl_is_valid_access, 1675 .convert_ctx_access = sysctl_convert_ctx_access, 1676 }; 1677 1678 const struct bpf_prog_ops cg_sysctl_prog_ops = { 1679 }; 1680 1681 static const struct bpf_func_proto * 1682 cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1683 { 1684 switch (func_id) { 1685 #ifdef CONFIG_NET 1686 case BPF_FUNC_sk_storage_get: 1687 return &bpf_sk_storage_get_proto; 1688 case BPF_FUNC_sk_storage_delete: 1689 return &bpf_sk_storage_delete_proto; 1690 #endif 1691 #ifdef CONFIG_INET 1692 case BPF_FUNC_tcp_sock: 1693 return &bpf_tcp_sock_proto; 1694 #endif 1695 default: 1696 return cgroup_base_func_proto(func_id, prog); 1697 } 1698 } 1699 1700 static bool cg_sockopt_is_valid_access(int off, int size, 1701 enum bpf_access_type type, 1702 const struct bpf_prog *prog, 1703 struct bpf_insn_access_aux *info) 1704 { 1705 const int size_default = sizeof(__u32); 1706 1707 if (off < 0 || off >= sizeof(struct bpf_sockopt)) 1708 return false; 1709 1710 if (off % size != 0) 1711 return false; 1712 1713 if (type == BPF_WRITE) { 1714 switch (off) { 1715 case offsetof(struct bpf_sockopt, retval): 1716 if (size != size_default) 1717 return false; 1718 return prog->expected_attach_type == 1719 BPF_CGROUP_GETSOCKOPT; 1720 case offsetof(struct bpf_sockopt, optname): 1721 /* fallthrough */ 1722 case offsetof(struct bpf_sockopt, level): 1723 if (size != size_default) 1724 return false; 1725 return prog->expected_attach_type == 1726 BPF_CGROUP_SETSOCKOPT; 1727 case offsetof(struct bpf_sockopt, optlen): 1728 return size == size_default; 1729 default: 1730 return false; 1731 } 1732 } 1733 1734 switch (off) { 1735 case offsetof(struct bpf_sockopt, sk): 1736 if (size != sizeof(__u64)) 1737 return false; 1738 info->reg_type = PTR_TO_SOCKET; 1739 break; 1740 case offsetof(struct bpf_sockopt, optval): 1741 if (size != sizeof(__u64)) 1742 return false; 1743 info->reg_type = PTR_TO_PACKET; 1744 break; 1745 case offsetof(struct bpf_sockopt, optval_end): 1746 if (size != sizeof(__u64)) 1747 return false; 1748 info->reg_type = PTR_TO_PACKET_END; 1749 break; 1750 case offsetof(struct bpf_sockopt, retval): 1751 if (size != size_default) 1752 return false; 1753 return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT; 1754 default: 1755 if (size != size_default) 1756 return false; 1757 break; 1758 } 1759 return true; 1760 } 1761 1762 #define CG_SOCKOPT_ACCESS_FIELD(T, F) \ 1763 T(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F), \ 1764 si->dst_reg, si->src_reg, \ 1765 offsetof(struct bpf_sockopt_kern, F)) 1766 1767 static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type, 1768 const struct bpf_insn *si, 1769 struct bpf_insn *insn_buf, 1770 struct bpf_prog *prog, 1771 u32 *target_size) 1772 { 1773 struct bpf_insn *insn = insn_buf; 1774 1775 switch (si->off) { 1776 case offsetof(struct bpf_sockopt, sk): 1777 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, sk); 1778 break; 1779 case offsetof(struct bpf_sockopt, level): 1780 if (type == BPF_WRITE) 1781 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, level); 1782 else 1783 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, level); 1784 break; 1785 case offsetof(struct bpf_sockopt, optname): 1786 if (type == BPF_WRITE) 1787 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optname); 1788 else 1789 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optname); 1790 break; 1791 case offsetof(struct bpf_sockopt, optlen): 1792 if (type == BPF_WRITE) 1793 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optlen); 1794 else 1795 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optlen); 1796 break; 1797 case offsetof(struct bpf_sockopt, retval): 1798 if (type == BPF_WRITE) 1799 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, retval); 1800 else 1801 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, retval); 1802 break; 1803 case offsetof(struct bpf_sockopt, optval): 1804 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval); 1805 break; 1806 case offsetof(struct bpf_sockopt, optval_end): 1807 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval_end); 1808 break; 1809 } 1810 1811 return insn - insn_buf; 1812 } 1813 1814 static int cg_sockopt_get_prologue(struct bpf_insn *insn_buf, 1815 bool direct_write, 1816 const struct bpf_prog *prog) 1817 { 1818 /* Nothing to do for sockopt argument. The data is kzalloc'ated. 1819 */ 1820 return 0; 1821 } 1822 1823 const struct bpf_verifier_ops cg_sockopt_verifier_ops = { 1824 .get_func_proto = cg_sockopt_func_proto, 1825 .is_valid_access = cg_sockopt_is_valid_access, 1826 .convert_ctx_access = cg_sockopt_convert_ctx_access, 1827 .gen_prologue = cg_sockopt_get_prologue, 1828 }; 1829 1830 const struct bpf_prog_ops cg_sockopt_prog_ops = { 1831 }; 1832