1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Functions to manage eBPF programs attached to cgroups 4 * 5 * Copyright (c) 2016 Daniel Mack 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/atomic.h> 10 #include <linux/cgroup.h> 11 #include <linux/filter.h> 12 #include <linux/slab.h> 13 #include <linux/sysctl.h> 14 #include <linux/string.h> 15 #include <linux/bpf.h> 16 #include <linux/bpf-cgroup.h> 17 #include <net/sock.h> 18 #include <net/bpf_sk_storage.h> 19 20 #include "../cgroup/cgroup-internal.h" 21 22 DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key); 23 EXPORT_SYMBOL(cgroup_bpf_enabled_key); 24 25 void cgroup_bpf_offline(struct cgroup *cgrp) 26 { 27 cgroup_get(cgrp); 28 percpu_ref_kill(&cgrp->bpf.refcnt); 29 } 30 31 static void bpf_cgroup_storages_free(struct bpf_cgroup_storage *storages[]) 32 { 33 enum bpf_cgroup_storage_type stype; 34 35 for_each_cgroup_storage_type(stype) 36 bpf_cgroup_storage_free(storages[stype]); 37 } 38 39 static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage *storages[], 40 struct bpf_cgroup_storage *new_storages[], 41 enum bpf_attach_type type, 42 struct bpf_prog *prog, 43 struct cgroup *cgrp) 44 { 45 enum bpf_cgroup_storage_type stype; 46 struct bpf_cgroup_storage_key key; 47 struct bpf_map *map; 48 49 key.cgroup_inode_id = cgroup_id(cgrp); 50 key.attach_type = type; 51 52 for_each_cgroup_storage_type(stype) { 53 map = prog->aux->cgroup_storage[stype]; 54 if (!map) 55 continue; 56 57 storages[stype] = cgroup_storage_lookup((void *)map, &key, false); 58 if (storages[stype]) 59 continue; 60 61 storages[stype] = bpf_cgroup_storage_alloc(prog, stype); 62 if (IS_ERR(storages[stype])) { 63 bpf_cgroup_storages_free(new_storages); 64 return -ENOMEM; 65 } 66 67 new_storages[stype] = storages[stype]; 68 } 69 70 return 0; 71 } 72 73 static void bpf_cgroup_storages_assign(struct bpf_cgroup_storage *dst[], 74 struct bpf_cgroup_storage *src[]) 75 { 76 enum bpf_cgroup_storage_type stype; 77 78 for_each_cgroup_storage_type(stype) 79 dst[stype] = src[stype]; 80 } 81 82 static void bpf_cgroup_storages_link(struct bpf_cgroup_storage *storages[], 83 struct cgroup *cgrp, 84 enum bpf_attach_type attach_type) 85 { 86 enum bpf_cgroup_storage_type stype; 87 88 for_each_cgroup_storage_type(stype) 89 bpf_cgroup_storage_link(storages[stype], cgrp, attach_type); 90 } 91 92 /* Called when bpf_cgroup_link is auto-detached from dying cgroup. 93 * It drops cgroup and bpf_prog refcounts, and marks bpf_link as defunct. It 94 * doesn't free link memory, which will eventually be done by bpf_link's 95 * release() callback, when its last FD is closed. 96 */ 97 static void bpf_cgroup_link_auto_detach(struct bpf_cgroup_link *link) 98 { 99 cgroup_put(link->cgroup); 100 link->cgroup = NULL; 101 } 102 103 /** 104 * cgroup_bpf_release() - put references of all bpf programs and 105 * release all cgroup bpf data 106 * @work: work structure embedded into the cgroup to modify 107 */ 108 static void cgroup_bpf_release(struct work_struct *work) 109 { 110 struct cgroup *p, *cgrp = container_of(work, struct cgroup, 111 bpf.release_work); 112 struct bpf_prog_array *old_array; 113 struct list_head *storages = &cgrp->bpf.storages; 114 struct bpf_cgroup_storage *storage, *stmp; 115 116 unsigned int type; 117 118 mutex_lock(&cgroup_mutex); 119 120 for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) { 121 struct list_head *progs = &cgrp->bpf.progs[type]; 122 struct bpf_prog_list *pl, *pltmp; 123 124 list_for_each_entry_safe(pl, pltmp, progs, node) { 125 list_del(&pl->node); 126 if (pl->prog) 127 bpf_prog_put(pl->prog); 128 if (pl->link) 129 bpf_cgroup_link_auto_detach(pl->link); 130 kfree(pl); 131 static_branch_dec(&cgroup_bpf_enabled_key); 132 } 133 old_array = rcu_dereference_protected( 134 cgrp->bpf.effective[type], 135 lockdep_is_held(&cgroup_mutex)); 136 bpf_prog_array_free(old_array); 137 } 138 139 list_for_each_entry_safe(storage, stmp, storages, list_cg) { 140 bpf_cgroup_storage_unlink(storage); 141 bpf_cgroup_storage_free(storage); 142 } 143 144 mutex_unlock(&cgroup_mutex); 145 146 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) 147 cgroup_bpf_put(p); 148 149 percpu_ref_exit(&cgrp->bpf.refcnt); 150 cgroup_put(cgrp); 151 } 152 153 /** 154 * cgroup_bpf_release_fn() - callback used to schedule releasing 155 * of bpf cgroup data 156 * @ref: percpu ref counter structure 157 */ 158 static void cgroup_bpf_release_fn(struct percpu_ref *ref) 159 { 160 struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt); 161 162 INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release); 163 queue_work(system_wq, &cgrp->bpf.release_work); 164 } 165 166 /* Get underlying bpf_prog of bpf_prog_list entry, regardless if it's through 167 * link or direct prog. 168 */ 169 static struct bpf_prog *prog_list_prog(struct bpf_prog_list *pl) 170 { 171 if (pl->prog) 172 return pl->prog; 173 if (pl->link) 174 return pl->link->link.prog; 175 return NULL; 176 } 177 178 /* count number of elements in the list. 179 * it's slow but the list cannot be long 180 */ 181 static u32 prog_list_length(struct list_head *head) 182 { 183 struct bpf_prog_list *pl; 184 u32 cnt = 0; 185 186 list_for_each_entry(pl, head, node) { 187 if (!prog_list_prog(pl)) 188 continue; 189 cnt++; 190 } 191 return cnt; 192 } 193 194 /* if parent has non-overridable prog attached, 195 * disallow attaching new programs to the descendent cgroup. 196 * if parent has overridable or multi-prog, allow attaching 197 */ 198 static bool hierarchy_allows_attach(struct cgroup *cgrp, 199 enum bpf_attach_type type) 200 { 201 struct cgroup *p; 202 203 p = cgroup_parent(cgrp); 204 if (!p) 205 return true; 206 do { 207 u32 flags = p->bpf.flags[type]; 208 u32 cnt; 209 210 if (flags & BPF_F_ALLOW_MULTI) 211 return true; 212 cnt = prog_list_length(&p->bpf.progs[type]); 213 WARN_ON_ONCE(cnt > 1); 214 if (cnt == 1) 215 return !!(flags & BPF_F_ALLOW_OVERRIDE); 216 p = cgroup_parent(p); 217 } while (p); 218 return true; 219 } 220 221 /* compute a chain of effective programs for a given cgroup: 222 * start from the list of programs in this cgroup and add 223 * all parent programs. 224 * Note that parent's F_ALLOW_OVERRIDE-type program is yielding 225 * to programs in this cgroup 226 */ 227 static int compute_effective_progs(struct cgroup *cgrp, 228 enum bpf_attach_type type, 229 struct bpf_prog_array **array) 230 { 231 struct bpf_prog_array_item *item; 232 struct bpf_prog_array *progs; 233 struct bpf_prog_list *pl; 234 struct cgroup *p = cgrp; 235 int cnt = 0; 236 237 /* count number of effective programs by walking parents */ 238 do { 239 if (cnt == 0 || (p->bpf.flags[type] & BPF_F_ALLOW_MULTI)) 240 cnt += prog_list_length(&p->bpf.progs[type]); 241 p = cgroup_parent(p); 242 } while (p); 243 244 progs = bpf_prog_array_alloc(cnt, GFP_KERNEL); 245 if (!progs) 246 return -ENOMEM; 247 248 /* populate the array with effective progs */ 249 cnt = 0; 250 p = cgrp; 251 do { 252 if (cnt > 0 && !(p->bpf.flags[type] & BPF_F_ALLOW_MULTI)) 253 continue; 254 255 list_for_each_entry(pl, &p->bpf.progs[type], node) { 256 if (!prog_list_prog(pl)) 257 continue; 258 259 item = &progs->items[cnt]; 260 item->prog = prog_list_prog(pl); 261 bpf_cgroup_storages_assign(item->cgroup_storage, 262 pl->storage); 263 cnt++; 264 } 265 } while ((p = cgroup_parent(p))); 266 267 *array = progs; 268 return 0; 269 } 270 271 static void activate_effective_progs(struct cgroup *cgrp, 272 enum bpf_attach_type type, 273 struct bpf_prog_array *old_array) 274 { 275 old_array = rcu_replace_pointer(cgrp->bpf.effective[type], old_array, 276 lockdep_is_held(&cgroup_mutex)); 277 /* free prog array after grace period, since __cgroup_bpf_run_*() 278 * might be still walking the array 279 */ 280 bpf_prog_array_free(old_array); 281 } 282 283 /** 284 * cgroup_bpf_inherit() - inherit effective programs from parent 285 * @cgrp: the cgroup to modify 286 */ 287 int cgroup_bpf_inherit(struct cgroup *cgrp) 288 { 289 /* has to use marco instead of const int, since compiler thinks 290 * that array below is variable length 291 */ 292 #define NR ARRAY_SIZE(cgrp->bpf.effective) 293 struct bpf_prog_array *arrays[NR] = {}; 294 struct cgroup *p; 295 int ret, i; 296 297 ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0, 298 GFP_KERNEL); 299 if (ret) 300 return ret; 301 302 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) 303 cgroup_bpf_get(p); 304 305 for (i = 0; i < NR; i++) 306 INIT_LIST_HEAD(&cgrp->bpf.progs[i]); 307 308 INIT_LIST_HEAD(&cgrp->bpf.storages); 309 310 for (i = 0; i < NR; i++) 311 if (compute_effective_progs(cgrp, i, &arrays[i])) 312 goto cleanup; 313 314 for (i = 0; i < NR; i++) 315 activate_effective_progs(cgrp, i, arrays[i]); 316 317 return 0; 318 cleanup: 319 for (i = 0; i < NR; i++) 320 bpf_prog_array_free(arrays[i]); 321 322 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) 323 cgroup_bpf_put(p); 324 325 percpu_ref_exit(&cgrp->bpf.refcnt); 326 327 return -ENOMEM; 328 } 329 330 static int update_effective_progs(struct cgroup *cgrp, 331 enum bpf_attach_type type) 332 { 333 struct cgroup_subsys_state *css; 334 int err; 335 336 /* allocate and recompute effective prog arrays */ 337 css_for_each_descendant_pre(css, &cgrp->self) { 338 struct cgroup *desc = container_of(css, struct cgroup, self); 339 340 if (percpu_ref_is_zero(&desc->bpf.refcnt)) 341 continue; 342 343 err = compute_effective_progs(desc, type, &desc->bpf.inactive); 344 if (err) 345 goto cleanup; 346 } 347 348 /* all allocations were successful. Activate all prog arrays */ 349 css_for_each_descendant_pre(css, &cgrp->self) { 350 struct cgroup *desc = container_of(css, struct cgroup, self); 351 352 if (percpu_ref_is_zero(&desc->bpf.refcnt)) { 353 if (unlikely(desc->bpf.inactive)) { 354 bpf_prog_array_free(desc->bpf.inactive); 355 desc->bpf.inactive = NULL; 356 } 357 continue; 358 } 359 360 activate_effective_progs(desc, type, desc->bpf.inactive); 361 desc->bpf.inactive = NULL; 362 } 363 364 return 0; 365 366 cleanup: 367 /* oom while computing effective. Free all computed effective arrays 368 * since they were not activated 369 */ 370 css_for_each_descendant_pre(css, &cgrp->self) { 371 struct cgroup *desc = container_of(css, struct cgroup, self); 372 373 bpf_prog_array_free(desc->bpf.inactive); 374 desc->bpf.inactive = NULL; 375 } 376 377 return err; 378 } 379 380 #define BPF_CGROUP_MAX_PROGS 64 381 382 static struct bpf_prog_list *find_attach_entry(struct list_head *progs, 383 struct bpf_prog *prog, 384 struct bpf_cgroup_link *link, 385 struct bpf_prog *replace_prog, 386 bool allow_multi) 387 { 388 struct bpf_prog_list *pl; 389 390 /* single-attach case */ 391 if (!allow_multi) { 392 if (list_empty(progs)) 393 return NULL; 394 return list_first_entry(progs, typeof(*pl), node); 395 } 396 397 list_for_each_entry(pl, progs, node) { 398 if (prog && pl->prog == prog && prog != replace_prog) 399 /* disallow attaching the same prog twice */ 400 return ERR_PTR(-EINVAL); 401 if (link && pl->link == link) 402 /* disallow attaching the same link twice */ 403 return ERR_PTR(-EINVAL); 404 } 405 406 /* direct prog multi-attach w/ replacement case */ 407 if (replace_prog) { 408 list_for_each_entry(pl, progs, node) { 409 if (pl->prog == replace_prog) 410 /* a match found */ 411 return pl; 412 } 413 /* prog to replace not found for cgroup */ 414 return ERR_PTR(-ENOENT); 415 } 416 417 return NULL; 418 } 419 420 /** 421 * __cgroup_bpf_attach() - Attach the program or the link to a cgroup, and 422 * propagate the change to descendants 423 * @cgrp: The cgroup which descendants to traverse 424 * @prog: A program to attach 425 * @link: A link to attach 426 * @replace_prog: Previously attached program to replace if BPF_F_REPLACE is set 427 * @type: Type of attach operation 428 * @flags: Option flags 429 * 430 * Exactly one of @prog or @link can be non-null. 431 * Must be called with cgroup_mutex held. 432 */ 433 int __cgroup_bpf_attach(struct cgroup *cgrp, 434 struct bpf_prog *prog, struct bpf_prog *replace_prog, 435 struct bpf_cgroup_link *link, 436 enum bpf_attach_type type, u32 flags) 437 { 438 u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI)); 439 struct list_head *progs = &cgrp->bpf.progs[type]; 440 struct bpf_prog *old_prog = NULL; 441 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {}; 442 struct bpf_cgroup_storage *new_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {}; 443 struct bpf_prog_list *pl; 444 int err; 445 446 if (((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) || 447 ((flags & BPF_F_REPLACE) && !(flags & BPF_F_ALLOW_MULTI))) 448 /* invalid combination */ 449 return -EINVAL; 450 if (link && (prog || replace_prog)) 451 /* only either link or prog/replace_prog can be specified */ 452 return -EINVAL; 453 if (!!replace_prog != !!(flags & BPF_F_REPLACE)) 454 /* replace_prog implies BPF_F_REPLACE, and vice versa */ 455 return -EINVAL; 456 457 if (!hierarchy_allows_attach(cgrp, type)) 458 return -EPERM; 459 460 if (!list_empty(progs) && cgrp->bpf.flags[type] != saved_flags) 461 /* Disallow attaching non-overridable on top 462 * of existing overridable in this cgroup. 463 * Disallow attaching multi-prog if overridable or none 464 */ 465 return -EPERM; 466 467 if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS) 468 return -E2BIG; 469 470 pl = find_attach_entry(progs, prog, link, replace_prog, 471 flags & BPF_F_ALLOW_MULTI); 472 if (IS_ERR(pl)) 473 return PTR_ERR(pl); 474 475 if (bpf_cgroup_storages_alloc(storage, new_storage, type, 476 prog ? : link->link.prog, cgrp)) 477 return -ENOMEM; 478 479 if (pl) { 480 old_prog = pl->prog; 481 } else { 482 pl = kmalloc(sizeof(*pl), GFP_KERNEL); 483 if (!pl) { 484 bpf_cgroup_storages_free(new_storage); 485 return -ENOMEM; 486 } 487 list_add_tail(&pl->node, progs); 488 } 489 490 pl->prog = prog; 491 pl->link = link; 492 bpf_cgroup_storages_assign(pl->storage, storage); 493 cgrp->bpf.flags[type] = saved_flags; 494 495 err = update_effective_progs(cgrp, type); 496 if (err) 497 goto cleanup; 498 499 if (old_prog) 500 bpf_prog_put(old_prog); 501 else 502 static_branch_inc(&cgroup_bpf_enabled_key); 503 bpf_cgroup_storages_link(new_storage, cgrp, type); 504 return 0; 505 506 cleanup: 507 if (old_prog) { 508 pl->prog = old_prog; 509 pl->link = NULL; 510 } 511 bpf_cgroup_storages_free(new_storage); 512 if (!old_prog) { 513 list_del(&pl->node); 514 kfree(pl); 515 } 516 return err; 517 } 518 519 /* Swap updated BPF program for given link in effective program arrays across 520 * all descendant cgroups. This function is guaranteed to succeed. 521 */ 522 static void replace_effective_prog(struct cgroup *cgrp, 523 enum bpf_attach_type type, 524 struct bpf_cgroup_link *link) 525 { 526 struct bpf_prog_array_item *item; 527 struct cgroup_subsys_state *css; 528 struct bpf_prog_array *progs; 529 struct bpf_prog_list *pl; 530 struct list_head *head; 531 struct cgroup *cg; 532 int pos; 533 534 css_for_each_descendant_pre(css, &cgrp->self) { 535 struct cgroup *desc = container_of(css, struct cgroup, self); 536 537 if (percpu_ref_is_zero(&desc->bpf.refcnt)) 538 continue; 539 540 /* find position of link in effective progs array */ 541 for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) { 542 if (pos && !(cg->bpf.flags[type] & BPF_F_ALLOW_MULTI)) 543 continue; 544 545 head = &cg->bpf.progs[type]; 546 list_for_each_entry(pl, head, node) { 547 if (!prog_list_prog(pl)) 548 continue; 549 if (pl->link == link) 550 goto found; 551 pos++; 552 } 553 } 554 found: 555 BUG_ON(!cg); 556 progs = rcu_dereference_protected( 557 desc->bpf.effective[type], 558 lockdep_is_held(&cgroup_mutex)); 559 item = &progs->items[pos]; 560 WRITE_ONCE(item->prog, link->link.prog); 561 } 562 } 563 564 /** 565 * __cgroup_bpf_replace() - Replace link's program and propagate the change 566 * to descendants 567 * @cgrp: The cgroup which descendants to traverse 568 * @link: A link for which to replace BPF program 569 * @type: Type of attach operation 570 * 571 * Must be called with cgroup_mutex held. 572 */ 573 static int __cgroup_bpf_replace(struct cgroup *cgrp, 574 struct bpf_cgroup_link *link, 575 struct bpf_prog *new_prog) 576 { 577 struct list_head *progs = &cgrp->bpf.progs[link->type]; 578 struct bpf_prog *old_prog; 579 struct bpf_prog_list *pl; 580 bool found = false; 581 582 if (link->link.prog->type != new_prog->type) 583 return -EINVAL; 584 585 list_for_each_entry(pl, progs, node) { 586 if (pl->link == link) { 587 found = true; 588 break; 589 } 590 } 591 if (!found) 592 return -ENOENT; 593 594 old_prog = xchg(&link->link.prog, new_prog); 595 replace_effective_prog(cgrp, link->type, link); 596 bpf_prog_put(old_prog); 597 return 0; 598 } 599 600 static int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *new_prog, 601 struct bpf_prog *old_prog) 602 { 603 struct bpf_cgroup_link *cg_link; 604 int ret; 605 606 cg_link = container_of(link, struct bpf_cgroup_link, link); 607 608 mutex_lock(&cgroup_mutex); 609 /* link might have been auto-released by dying cgroup, so fail */ 610 if (!cg_link->cgroup) { 611 ret = -ENOLINK; 612 goto out_unlock; 613 } 614 if (old_prog && link->prog != old_prog) { 615 ret = -EPERM; 616 goto out_unlock; 617 } 618 ret = __cgroup_bpf_replace(cg_link->cgroup, cg_link, new_prog); 619 out_unlock: 620 mutex_unlock(&cgroup_mutex); 621 return ret; 622 } 623 624 static struct bpf_prog_list *find_detach_entry(struct list_head *progs, 625 struct bpf_prog *prog, 626 struct bpf_cgroup_link *link, 627 bool allow_multi) 628 { 629 struct bpf_prog_list *pl; 630 631 if (!allow_multi) { 632 if (list_empty(progs)) 633 /* report error when trying to detach and nothing is attached */ 634 return ERR_PTR(-ENOENT); 635 636 /* to maintain backward compatibility NONE and OVERRIDE cgroups 637 * allow detaching with invalid FD (prog==NULL) in legacy mode 638 */ 639 return list_first_entry(progs, typeof(*pl), node); 640 } 641 642 if (!prog && !link) 643 /* to detach MULTI prog the user has to specify valid FD 644 * of the program or link to be detached 645 */ 646 return ERR_PTR(-EINVAL); 647 648 /* find the prog or link and detach it */ 649 list_for_each_entry(pl, progs, node) { 650 if (pl->prog == prog && pl->link == link) 651 return pl; 652 } 653 return ERR_PTR(-ENOENT); 654 } 655 656 /** 657 * __cgroup_bpf_detach() - Detach the program or link from a cgroup, and 658 * propagate the change to descendants 659 * @cgrp: The cgroup which descendants to traverse 660 * @prog: A program to detach or NULL 661 * @prog: A link to detach or NULL 662 * @type: Type of detach operation 663 * 664 * At most one of @prog or @link can be non-NULL. 665 * Must be called with cgroup_mutex held. 666 */ 667 int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, 668 struct bpf_cgroup_link *link, enum bpf_attach_type type) 669 { 670 struct list_head *progs = &cgrp->bpf.progs[type]; 671 u32 flags = cgrp->bpf.flags[type]; 672 struct bpf_prog_list *pl; 673 struct bpf_prog *old_prog; 674 int err; 675 676 if (prog && link) 677 /* only one of prog or link can be specified */ 678 return -EINVAL; 679 680 pl = find_detach_entry(progs, prog, link, flags & BPF_F_ALLOW_MULTI); 681 if (IS_ERR(pl)) 682 return PTR_ERR(pl); 683 684 /* mark it deleted, so it's ignored while recomputing effective */ 685 old_prog = pl->prog; 686 pl->prog = NULL; 687 pl->link = NULL; 688 689 err = update_effective_progs(cgrp, type); 690 if (err) 691 goto cleanup; 692 693 /* now can actually delete it from this cgroup list */ 694 list_del(&pl->node); 695 kfree(pl); 696 if (list_empty(progs)) 697 /* last program was detached, reset flags to zero */ 698 cgrp->bpf.flags[type] = 0; 699 if (old_prog) 700 bpf_prog_put(old_prog); 701 static_branch_dec(&cgroup_bpf_enabled_key); 702 return 0; 703 704 cleanup: 705 /* restore back prog or link */ 706 pl->prog = old_prog; 707 pl->link = link; 708 return err; 709 } 710 711 /* Must be called with cgroup_mutex held to avoid races. */ 712 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, 713 union bpf_attr __user *uattr) 714 { 715 __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); 716 enum bpf_attach_type type = attr->query.attach_type; 717 struct list_head *progs = &cgrp->bpf.progs[type]; 718 u32 flags = cgrp->bpf.flags[type]; 719 struct bpf_prog_array *effective; 720 struct bpf_prog *prog; 721 int cnt, ret = 0, i; 722 723 effective = rcu_dereference_protected(cgrp->bpf.effective[type], 724 lockdep_is_held(&cgroup_mutex)); 725 726 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) 727 cnt = bpf_prog_array_length(effective); 728 else 729 cnt = prog_list_length(progs); 730 731 if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags))) 732 return -EFAULT; 733 if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt))) 734 return -EFAULT; 735 if (attr->query.prog_cnt == 0 || !prog_ids || !cnt) 736 /* return early if user requested only program count + flags */ 737 return 0; 738 if (attr->query.prog_cnt < cnt) { 739 cnt = attr->query.prog_cnt; 740 ret = -ENOSPC; 741 } 742 743 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) { 744 return bpf_prog_array_copy_to_user(effective, prog_ids, cnt); 745 } else { 746 struct bpf_prog_list *pl; 747 u32 id; 748 749 i = 0; 750 list_for_each_entry(pl, progs, node) { 751 prog = prog_list_prog(pl); 752 id = prog->aux->id; 753 if (copy_to_user(prog_ids + i, &id, sizeof(id))) 754 return -EFAULT; 755 if (++i == cnt) 756 break; 757 } 758 } 759 return ret; 760 } 761 762 int cgroup_bpf_prog_attach(const union bpf_attr *attr, 763 enum bpf_prog_type ptype, struct bpf_prog *prog) 764 { 765 struct bpf_prog *replace_prog = NULL; 766 struct cgroup *cgrp; 767 int ret; 768 769 cgrp = cgroup_get_from_fd(attr->target_fd); 770 if (IS_ERR(cgrp)) 771 return PTR_ERR(cgrp); 772 773 if ((attr->attach_flags & BPF_F_ALLOW_MULTI) && 774 (attr->attach_flags & BPF_F_REPLACE)) { 775 replace_prog = bpf_prog_get_type(attr->replace_bpf_fd, ptype); 776 if (IS_ERR(replace_prog)) { 777 cgroup_put(cgrp); 778 return PTR_ERR(replace_prog); 779 } 780 } 781 782 ret = cgroup_bpf_attach(cgrp, prog, replace_prog, NULL, 783 attr->attach_type, attr->attach_flags); 784 785 if (replace_prog) 786 bpf_prog_put(replace_prog); 787 cgroup_put(cgrp); 788 return ret; 789 } 790 791 int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) 792 { 793 struct bpf_prog *prog; 794 struct cgroup *cgrp; 795 int ret; 796 797 cgrp = cgroup_get_from_fd(attr->target_fd); 798 if (IS_ERR(cgrp)) 799 return PTR_ERR(cgrp); 800 801 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 802 if (IS_ERR(prog)) 803 prog = NULL; 804 805 ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type); 806 if (prog) 807 bpf_prog_put(prog); 808 809 cgroup_put(cgrp); 810 return ret; 811 } 812 813 static void bpf_cgroup_link_release(struct bpf_link *link) 814 { 815 struct bpf_cgroup_link *cg_link = 816 container_of(link, struct bpf_cgroup_link, link); 817 818 /* link might have been auto-detached by dying cgroup already, 819 * in that case our work is done here 820 */ 821 if (!cg_link->cgroup) 822 return; 823 824 mutex_lock(&cgroup_mutex); 825 826 /* re-check cgroup under lock again */ 827 if (!cg_link->cgroup) { 828 mutex_unlock(&cgroup_mutex); 829 return; 830 } 831 832 WARN_ON(__cgroup_bpf_detach(cg_link->cgroup, NULL, cg_link, 833 cg_link->type)); 834 835 mutex_unlock(&cgroup_mutex); 836 cgroup_put(cg_link->cgroup); 837 } 838 839 static void bpf_cgroup_link_dealloc(struct bpf_link *link) 840 { 841 struct bpf_cgroup_link *cg_link = 842 container_of(link, struct bpf_cgroup_link, link); 843 844 kfree(cg_link); 845 } 846 847 static void bpf_cgroup_link_show_fdinfo(const struct bpf_link *link, 848 struct seq_file *seq) 849 { 850 struct bpf_cgroup_link *cg_link = 851 container_of(link, struct bpf_cgroup_link, link); 852 u64 cg_id = 0; 853 854 mutex_lock(&cgroup_mutex); 855 if (cg_link->cgroup) 856 cg_id = cgroup_id(cg_link->cgroup); 857 mutex_unlock(&cgroup_mutex); 858 859 seq_printf(seq, 860 "cgroup_id:\t%llu\n" 861 "attach_type:\t%d\n", 862 cg_id, 863 cg_link->type); 864 } 865 866 static int bpf_cgroup_link_fill_link_info(const struct bpf_link *link, 867 struct bpf_link_info *info) 868 { 869 struct bpf_cgroup_link *cg_link = 870 container_of(link, struct bpf_cgroup_link, link); 871 u64 cg_id = 0; 872 873 mutex_lock(&cgroup_mutex); 874 if (cg_link->cgroup) 875 cg_id = cgroup_id(cg_link->cgroup); 876 mutex_unlock(&cgroup_mutex); 877 878 info->cgroup.cgroup_id = cg_id; 879 info->cgroup.attach_type = cg_link->type; 880 return 0; 881 } 882 883 static const struct bpf_link_ops bpf_cgroup_link_lops = { 884 .release = bpf_cgroup_link_release, 885 .dealloc = bpf_cgroup_link_dealloc, 886 .update_prog = cgroup_bpf_replace, 887 .show_fdinfo = bpf_cgroup_link_show_fdinfo, 888 .fill_link_info = bpf_cgroup_link_fill_link_info, 889 }; 890 891 int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 892 { 893 struct bpf_link_primer link_primer; 894 struct bpf_cgroup_link *link; 895 struct cgroup *cgrp; 896 int err; 897 898 if (attr->link_create.flags) 899 return -EINVAL; 900 901 cgrp = cgroup_get_from_fd(attr->link_create.target_fd); 902 if (IS_ERR(cgrp)) 903 return PTR_ERR(cgrp); 904 905 link = kzalloc(sizeof(*link), GFP_USER); 906 if (!link) { 907 err = -ENOMEM; 908 goto out_put_cgroup; 909 } 910 bpf_link_init(&link->link, BPF_LINK_TYPE_CGROUP, &bpf_cgroup_link_lops, 911 prog); 912 link->cgroup = cgrp; 913 link->type = attr->link_create.attach_type; 914 915 err = bpf_link_prime(&link->link, &link_primer); 916 if (err) { 917 kfree(link); 918 goto out_put_cgroup; 919 } 920 921 err = cgroup_bpf_attach(cgrp, NULL, NULL, link, link->type, 922 BPF_F_ALLOW_MULTI); 923 if (err) { 924 bpf_link_cleanup(&link_primer); 925 goto out_put_cgroup; 926 } 927 928 return bpf_link_settle(&link_primer); 929 930 out_put_cgroup: 931 cgroup_put(cgrp); 932 return err; 933 } 934 935 int cgroup_bpf_prog_query(const union bpf_attr *attr, 936 union bpf_attr __user *uattr) 937 { 938 struct cgroup *cgrp; 939 int ret; 940 941 cgrp = cgroup_get_from_fd(attr->query.target_fd); 942 if (IS_ERR(cgrp)) 943 return PTR_ERR(cgrp); 944 945 ret = cgroup_bpf_query(cgrp, attr, uattr); 946 947 cgroup_put(cgrp); 948 return ret; 949 } 950 951 /** 952 * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering 953 * @sk: The socket sending or receiving traffic 954 * @skb: The skb that is being sent or received 955 * @type: The type of program to be exectuted 956 * 957 * If no socket is passed, or the socket is not of type INET or INET6, 958 * this function does nothing and returns 0. 959 * 960 * The program type passed in via @type must be suitable for network 961 * filtering. No further check is performed to assert that. 962 * 963 * For egress packets, this function can return: 964 * NET_XMIT_SUCCESS (0) - continue with packet output 965 * NET_XMIT_DROP (1) - drop packet and notify TCP to call cwr 966 * NET_XMIT_CN (2) - continue with packet output and notify TCP 967 * to call cwr 968 * -EPERM - drop packet 969 * 970 * For ingress packets, this function will return -EPERM if any 971 * attached program was found and if it returned != 1 during execution. 972 * Otherwise 0 is returned. 973 */ 974 int __cgroup_bpf_run_filter_skb(struct sock *sk, 975 struct sk_buff *skb, 976 enum bpf_attach_type type) 977 { 978 unsigned int offset = skb->data - skb_network_header(skb); 979 struct sock *save_sk; 980 void *saved_data_end; 981 struct cgroup *cgrp; 982 int ret; 983 984 if (!sk || !sk_fullsock(sk)) 985 return 0; 986 987 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6) 988 return 0; 989 990 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 991 save_sk = skb->sk; 992 skb->sk = sk; 993 __skb_push(skb, offset); 994 995 /* compute pointers for the bpf prog */ 996 bpf_compute_and_save_data_end(skb, &saved_data_end); 997 998 if (type == BPF_CGROUP_INET_EGRESS) { 999 ret = BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY( 1000 cgrp->bpf.effective[type], skb, __bpf_prog_run_save_cb); 1001 } else { 1002 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb, 1003 __bpf_prog_run_save_cb); 1004 ret = (ret == 1 ? 0 : -EPERM); 1005 } 1006 bpf_restore_data_end(skb, saved_data_end); 1007 __skb_pull(skb, offset); 1008 skb->sk = save_sk; 1009 1010 return ret; 1011 } 1012 EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb); 1013 1014 /** 1015 * __cgroup_bpf_run_filter_sk() - Run a program on a sock 1016 * @sk: sock structure to manipulate 1017 * @type: The type of program to be exectuted 1018 * 1019 * socket is passed is expected to be of type INET or INET6. 1020 * 1021 * The program type passed in via @type must be suitable for sock 1022 * filtering. No further check is performed to assert that. 1023 * 1024 * This function will return %-EPERM if any if an attached program was found 1025 * and if it returned != 1 during execution. In all other cases, 0 is returned. 1026 */ 1027 int __cgroup_bpf_run_filter_sk(struct sock *sk, 1028 enum bpf_attach_type type) 1029 { 1030 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1031 int ret; 1032 1033 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sk, BPF_PROG_RUN); 1034 return ret == 1 ? 0 : -EPERM; 1035 } 1036 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk); 1037 1038 /** 1039 * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and 1040 * provided by user sockaddr 1041 * @sk: sock struct that will use sockaddr 1042 * @uaddr: sockaddr struct provided by user 1043 * @type: The type of program to be exectuted 1044 * @t_ctx: Pointer to attach type specific context 1045 * 1046 * socket is expected to be of type INET or INET6. 1047 * 1048 * This function will return %-EPERM if an attached program is found and 1049 * returned value != 1 during execution. In all other cases, 0 is returned. 1050 */ 1051 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, 1052 struct sockaddr *uaddr, 1053 enum bpf_attach_type type, 1054 void *t_ctx) 1055 { 1056 struct bpf_sock_addr_kern ctx = { 1057 .sk = sk, 1058 .uaddr = uaddr, 1059 .t_ctx = t_ctx, 1060 }; 1061 struct sockaddr_storage unspec; 1062 struct cgroup *cgrp; 1063 int ret; 1064 1065 /* Check socket family since not all sockets represent network 1066 * endpoint (e.g. AF_UNIX). 1067 */ 1068 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6) 1069 return 0; 1070 1071 if (!ctx.uaddr) { 1072 memset(&unspec, 0, sizeof(unspec)); 1073 ctx.uaddr = (struct sockaddr *)&unspec; 1074 } 1075 1076 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1077 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN); 1078 1079 return ret == 1 ? 0 : -EPERM; 1080 } 1081 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr); 1082 1083 /** 1084 * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock 1085 * @sk: socket to get cgroup from 1086 * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains 1087 * sk with connection information (IP addresses, etc.) May not contain 1088 * cgroup info if it is a req sock. 1089 * @type: The type of program to be exectuted 1090 * 1091 * socket passed is expected to be of type INET or INET6. 1092 * 1093 * The program type passed in via @type must be suitable for sock_ops 1094 * filtering. No further check is performed to assert that. 1095 * 1096 * This function will return %-EPERM if any if an attached program was found 1097 * and if it returned != 1 during execution. In all other cases, 0 is returned. 1098 */ 1099 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, 1100 struct bpf_sock_ops_kern *sock_ops, 1101 enum bpf_attach_type type) 1102 { 1103 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1104 int ret; 1105 1106 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sock_ops, 1107 BPF_PROG_RUN); 1108 return ret == 1 ? 0 : -EPERM; 1109 } 1110 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops); 1111 1112 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, 1113 short access, enum bpf_attach_type type) 1114 { 1115 struct cgroup *cgrp; 1116 struct bpf_cgroup_dev_ctx ctx = { 1117 .access_type = (access << 16) | dev_type, 1118 .major = major, 1119 .minor = minor, 1120 }; 1121 int allow = 1; 1122 1123 rcu_read_lock(); 1124 cgrp = task_dfl_cgroup(current); 1125 allow = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, 1126 BPF_PROG_RUN); 1127 rcu_read_unlock(); 1128 1129 return !allow; 1130 } 1131 1132 static const struct bpf_func_proto * 1133 cgroup_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1134 { 1135 switch (func_id) { 1136 case BPF_FUNC_get_current_uid_gid: 1137 return &bpf_get_current_uid_gid_proto; 1138 case BPF_FUNC_get_local_storage: 1139 return &bpf_get_local_storage_proto; 1140 case BPF_FUNC_get_current_cgroup_id: 1141 return &bpf_get_current_cgroup_id_proto; 1142 case BPF_FUNC_perf_event_output: 1143 return &bpf_event_output_data_proto; 1144 default: 1145 return bpf_base_func_proto(func_id); 1146 } 1147 } 1148 1149 static const struct bpf_func_proto * 1150 cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1151 { 1152 return cgroup_base_func_proto(func_id, prog); 1153 } 1154 1155 static bool cgroup_dev_is_valid_access(int off, int size, 1156 enum bpf_access_type type, 1157 const struct bpf_prog *prog, 1158 struct bpf_insn_access_aux *info) 1159 { 1160 const int size_default = sizeof(__u32); 1161 1162 if (type == BPF_WRITE) 1163 return false; 1164 1165 if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx)) 1166 return false; 1167 /* The verifier guarantees that size > 0. */ 1168 if (off % size != 0) 1169 return false; 1170 1171 switch (off) { 1172 case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type): 1173 bpf_ctx_record_field_size(info, size_default); 1174 if (!bpf_ctx_narrow_access_ok(off, size, size_default)) 1175 return false; 1176 break; 1177 default: 1178 if (size != size_default) 1179 return false; 1180 } 1181 1182 return true; 1183 } 1184 1185 const struct bpf_prog_ops cg_dev_prog_ops = { 1186 }; 1187 1188 const struct bpf_verifier_ops cg_dev_verifier_ops = { 1189 .get_func_proto = cgroup_dev_func_proto, 1190 .is_valid_access = cgroup_dev_is_valid_access, 1191 }; 1192 1193 /** 1194 * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl 1195 * 1196 * @head: sysctl table header 1197 * @table: sysctl table 1198 * @write: sysctl is being read (= 0) or written (= 1) 1199 * @buf: pointer to buffer (in and out) 1200 * @pcount: value-result argument: value is size of buffer pointed to by @buf, 1201 * result is size of @new_buf if program set new value, initial value 1202 * otherwise 1203 * @ppos: value-result argument: value is position at which read from or write 1204 * to sysctl is happening, result is new position if program overrode it, 1205 * initial value otherwise 1206 * @type: type of program to be executed 1207 * 1208 * Program is run when sysctl is being accessed, either read or written, and 1209 * can allow or deny such access. 1210 * 1211 * This function will return %-EPERM if an attached program is found and 1212 * returned value != 1 during execution. In all other cases 0 is returned. 1213 */ 1214 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, 1215 struct ctl_table *table, int write, 1216 void **buf, size_t *pcount, loff_t *ppos, 1217 enum bpf_attach_type type) 1218 { 1219 struct bpf_sysctl_kern ctx = { 1220 .head = head, 1221 .table = table, 1222 .write = write, 1223 .ppos = ppos, 1224 .cur_val = NULL, 1225 .cur_len = PAGE_SIZE, 1226 .new_val = NULL, 1227 .new_len = 0, 1228 .new_updated = 0, 1229 }; 1230 struct cgroup *cgrp; 1231 loff_t pos = 0; 1232 int ret; 1233 1234 ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL); 1235 if (!ctx.cur_val || 1236 table->proc_handler(table, 0, ctx.cur_val, &ctx.cur_len, &pos)) { 1237 /* Let BPF program decide how to proceed. */ 1238 ctx.cur_len = 0; 1239 } 1240 1241 if (write && *buf && *pcount) { 1242 /* BPF program should be able to override new value with a 1243 * buffer bigger than provided by user. 1244 */ 1245 ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL); 1246 ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount); 1247 if (ctx.new_val) { 1248 memcpy(ctx.new_val, *buf, ctx.new_len); 1249 } else { 1250 /* Let BPF program decide how to proceed. */ 1251 ctx.new_len = 0; 1252 } 1253 } 1254 1255 rcu_read_lock(); 1256 cgrp = task_dfl_cgroup(current); 1257 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN); 1258 rcu_read_unlock(); 1259 1260 kfree(ctx.cur_val); 1261 1262 if (ret == 1 && ctx.new_updated) { 1263 kfree(*buf); 1264 *buf = ctx.new_val; 1265 *pcount = ctx.new_len; 1266 } else { 1267 kfree(ctx.new_val); 1268 } 1269 1270 return ret == 1 ? 0 : -EPERM; 1271 } 1272 1273 #ifdef CONFIG_NET 1274 static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp, 1275 enum bpf_attach_type attach_type) 1276 { 1277 struct bpf_prog_array *prog_array; 1278 bool empty; 1279 1280 rcu_read_lock(); 1281 prog_array = rcu_dereference(cgrp->bpf.effective[attach_type]); 1282 empty = bpf_prog_array_is_empty(prog_array); 1283 rcu_read_unlock(); 1284 1285 return empty; 1286 } 1287 1288 static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen) 1289 { 1290 if (unlikely(max_optlen < 0)) 1291 return -EINVAL; 1292 1293 if (unlikely(max_optlen > PAGE_SIZE)) { 1294 /* We don't expose optvals that are greater than PAGE_SIZE 1295 * to the BPF program. 1296 */ 1297 max_optlen = PAGE_SIZE; 1298 } 1299 1300 ctx->optval = kzalloc(max_optlen, GFP_USER); 1301 if (!ctx->optval) 1302 return -ENOMEM; 1303 1304 ctx->optval_end = ctx->optval + max_optlen; 1305 1306 return max_optlen; 1307 } 1308 1309 static void sockopt_free_buf(struct bpf_sockopt_kern *ctx) 1310 { 1311 kfree(ctx->optval); 1312 } 1313 1314 int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level, 1315 int *optname, char __user *optval, 1316 int *optlen, char **kernel_optval) 1317 { 1318 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1319 struct bpf_sockopt_kern ctx = { 1320 .sk = sk, 1321 .level = *level, 1322 .optname = *optname, 1323 }; 1324 int ret, max_optlen; 1325 1326 /* Opportunistic check to see whether we have any BPF program 1327 * attached to the hook so we don't waste time allocating 1328 * memory and locking the socket. 1329 */ 1330 if (!cgroup_bpf_enabled || 1331 __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_SETSOCKOPT)) 1332 return 0; 1333 1334 /* Allocate a bit more than the initial user buffer for 1335 * BPF program. The canonical use case is overriding 1336 * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic). 1337 */ 1338 max_optlen = max_t(int, 16, *optlen); 1339 1340 max_optlen = sockopt_alloc_buf(&ctx, max_optlen); 1341 if (max_optlen < 0) 1342 return max_optlen; 1343 1344 ctx.optlen = *optlen; 1345 1346 if (copy_from_user(ctx.optval, optval, min(*optlen, max_optlen)) != 0) { 1347 ret = -EFAULT; 1348 goto out; 1349 } 1350 1351 lock_sock(sk); 1352 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_SETSOCKOPT], 1353 &ctx, BPF_PROG_RUN); 1354 release_sock(sk); 1355 1356 if (!ret) { 1357 ret = -EPERM; 1358 goto out; 1359 } 1360 1361 if (ctx.optlen == -1) { 1362 /* optlen set to -1, bypass kernel */ 1363 ret = 1; 1364 } else if (ctx.optlen > max_optlen || ctx.optlen < -1) { 1365 /* optlen is out of bounds */ 1366 ret = -EFAULT; 1367 } else { 1368 /* optlen within bounds, run kernel handler */ 1369 ret = 0; 1370 1371 /* export any potential modifications */ 1372 *level = ctx.level; 1373 *optname = ctx.optname; 1374 1375 /* optlen == 0 from BPF indicates that we should 1376 * use original userspace data. 1377 */ 1378 if (ctx.optlen != 0) { 1379 *optlen = ctx.optlen; 1380 *kernel_optval = ctx.optval; 1381 } 1382 } 1383 1384 out: 1385 if (ret) 1386 sockopt_free_buf(&ctx); 1387 return ret; 1388 } 1389 1390 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, 1391 int optname, char __user *optval, 1392 int __user *optlen, int max_optlen, 1393 int retval) 1394 { 1395 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1396 struct bpf_sockopt_kern ctx = { 1397 .sk = sk, 1398 .level = level, 1399 .optname = optname, 1400 .retval = retval, 1401 }; 1402 int ret; 1403 1404 /* Opportunistic check to see whether we have any BPF program 1405 * attached to the hook so we don't waste time allocating 1406 * memory and locking the socket. 1407 */ 1408 if (!cgroup_bpf_enabled || 1409 __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT)) 1410 return retval; 1411 1412 ctx.optlen = max_optlen; 1413 1414 max_optlen = sockopt_alloc_buf(&ctx, max_optlen); 1415 if (max_optlen < 0) 1416 return max_optlen; 1417 1418 if (!retval) { 1419 /* If kernel getsockopt finished successfully, 1420 * copy whatever was returned to the user back 1421 * into our temporary buffer. Set optlen to the 1422 * one that kernel returned as well to let 1423 * BPF programs inspect the value. 1424 */ 1425 1426 if (get_user(ctx.optlen, optlen)) { 1427 ret = -EFAULT; 1428 goto out; 1429 } 1430 1431 if (copy_from_user(ctx.optval, optval, 1432 min(ctx.optlen, max_optlen)) != 0) { 1433 ret = -EFAULT; 1434 goto out; 1435 } 1436 } 1437 1438 lock_sock(sk); 1439 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_GETSOCKOPT], 1440 &ctx, BPF_PROG_RUN); 1441 release_sock(sk); 1442 1443 if (!ret) { 1444 ret = -EPERM; 1445 goto out; 1446 } 1447 1448 if (ctx.optlen > max_optlen) { 1449 ret = -EFAULT; 1450 goto out; 1451 } 1452 1453 /* BPF programs only allowed to set retval to 0, not some 1454 * arbitrary value. 1455 */ 1456 if (ctx.retval != 0 && ctx.retval != retval) { 1457 ret = -EFAULT; 1458 goto out; 1459 } 1460 1461 if (ctx.optlen != 0) { 1462 if (copy_to_user(optval, ctx.optval, ctx.optlen) || 1463 put_user(ctx.optlen, optlen)) { 1464 ret = -EFAULT; 1465 goto out; 1466 } 1467 } 1468 1469 ret = ctx.retval; 1470 1471 out: 1472 sockopt_free_buf(&ctx); 1473 return ret; 1474 } 1475 #endif 1476 1477 static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp, 1478 size_t *lenp) 1479 { 1480 ssize_t tmp_ret = 0, ret; 1481 1482 if (dir->header.parent) { 1483 tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp); 1484 if (tmp_ret < 0) 1485 return tmp_ret; 1486 } 1487 1488 ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp); 1489 if (ret < 0) 1490 return ret; 1491 *bufp += ret; 1492 *lenp -= ret; 1493 ret += tmp_ret; 1494 1495 /* Avoid leading slash. */ 1496 if (!ret) 1497 return ret; 1498 1499 tmp_ret = strscpy(*bufp, "/", *lenp); 1500 if (tmp_ret < 0) 1501 return tmp_ret; 1502 *bufp += tmp_ret; 1503 *lenp -= tmp_ret; 1504 1505 return ret + tmp_ret; 1506 } 1507 1508 BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf, 1509 size_t, buf_len, u64, flags) 1510 { 1511 ssize_t tmp_ret = 0, ret; 1512 1513 if (!buf) 1514 return -EINVAL; 1515 1516 if (!(flags & BPF_F_SYSCTL_BASE_NAME)) { 1517 if (!ctx->head) 1518 return -EINVAL; 1519 tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len); 1520 if (tmp_ret < 0) 1521 return tmp_ret; 1522 } 1523 1524 ret = strscpy(buf, ctx->table->procname, buf_len); 1525 1526 return ret < 0 ? ret : tmp_ret + ret; 1527 } 1528 1529 static const struct bpf_func_proto bpf_sysctl_get_name_proto = { 1530 .func = bpf_sysctl_get_name, 1531 .gpl_only = false, 1532 .ret_type = RET_INTEGER, 1533 .arg1_type = ARG_PTR_TO_CTX, 1534 .arg2_type = ARG_PTR_TO_MEM, 1535 .arg3_type = ARG_CONST_SIZE, 1536 .arg4_type = ARG_ANYTHING, 1537 }; 1538 1539 static int copy_sysctl_value(char *dst, size_t dst_len, char *src, 1540 size_t src_len) 1541 { 1542 if (!dst) 1543 return -EINVAL; 1544 1545 if (!dst_len) 1546 return -E2BIG; 1547 1548 if (!src || !src_len) { 1549 memset(dst, 0, dst_len); 1550 return -EINVAL; 1551 } 1552 1553 memcpy(dst, src, min(dst_len, src_len)); 1554 1555 if (dst_len > src_len) { 1556 memset(dst + src_len, '\0', dst_len - src_len); 1557 return src_len; 1558 } 1559 1560 dst[dst_len - 1] = '\0'; 1561 1562 return -E2BIG; 1563 } 1564 1565 BPF_CALL_3(bpf_sysctl_get_current_value, struct bpf_sysctl_kern *, ctx, 1566 char *, buf, size_t, buf_len) 1567 { 1568 return copy_sysctl_value(buf, buf_len, ctx->cur_val, ctx->cur_len); 1569 } 1570 1571 static const struct bpf_func_proto bpf_sysctl_get_current_value_proto = { 1572 .func = bpf_sysctl_get_current_value, 1573 .gpl_only = false, 1574 .ret_type = RET_INTEGER, 1575 .arg1_type = ARG_PTR_TO_CTX, 1576 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1577 .arg3_type = ARG_CONST_SIZE, 1578 }; 1579 1580 BPF_CALL_3(bpf_sysctl_get_new_value, struct bpf_sysctl_kern *, ctx, char *, buf, 1581 size_t, buf_len) 1582 { 1583 if (!ctx->write) { 1584 if (buf && buf_len) 1585 memset(buf, '\0', buf_len); 1586 return -EINVAL; 1587 } 1588 return copy_sysctl_value(buf, buf_len, ctx->new_val, ctx->new_len); 1589 } 1590 1591 static const struct bpf_func_proto bpf_sysctl_get_new_value_proto = { 1592 .func = bpf_sysctl_get_new_value, 1593 .gpl_only = false, 1594 .ret_type = RET_INTEGER, 1595 .arg1_type = ARG_PTR_TO_CTX, 1596 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1597 .arg3_type = ARG_CONST_SIZE, 1598 }; 1599 1600 BPF_CALL_3(bpf_sysctl_set_new_value, struct bpf_sysctl_kern *, ctx, 1601 const char *, buf, size_t, buf_len) 1602 { 1603 if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len) 1604 return -EINVAL; 1605 1606 if (buf_len > PAGE_SIZE - 1) 1607 return -E2BIG; 1608 1609 memcpy(ctx->new_val, buf, buf_len); 1610 ctx->new_len = buf_len; 1611 ctx->new_updated = 1; 1612 1613 return 0; 1614 } 1615 1616 static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = { 1617 .func = bpf_sysctl_set_new_value, 1618 .gpl_only = false, 1619 .ret_type = RET_INTEGER, 1620 .arg1_type = ARG_PTR_TO_CTX, 1621 .arg2_type = ARG_PTR_TO_MEM, 1622 .arg3_type = ARG_CONST_SIZE, 1623 }; 1624 1625 static const struct bpf_func_proto * 1626 sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1627 { 1628 switch (func_id) { 1629 case BPF_FUNC_strtol: 1630 return &bpf_strtol_proto; 1631 case BPF_FUNC_strtoul: 1632 return &bpf_strtoul_proto; 1633 case BPF_FUNC_sysctl_get_name: 1634 return &bpf_sysctl_get_name_proto; 1635 case BPF_FUNC_sysctl_get_current_value: 1636 return &bpf_sysctl_get_current_value_proto; 1637 case BPF_FUNC_sysctl_get_new_value: 1638 return &bpf_sysctl_get_new_value_proto; 1639 case BPF_FUNC_sysctl_set_new_value: 1640 return &bpf_sysctl_set_new_value_proto; 1641 default: 1642 return cgroup_base_func_proto(func_id, prog); 1643 } 1644 } 1645 1646 static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type, 1647 const struct bpf_prog *prog, 1648 struct bpf_insn_access_aux *info) 1649 { 1650 const int size_default = sizeof(__u32); 1651 1652 if (off < 0 || off + size > sizeof(struct bpf_sysctl) || off % size) 1653 return false; 1654 1655 switch (off) { 1656 case bpf_ctx_range(struct bpf_sysctl, write): 1657 if (type != BPF_READ) 1658 return false; 1659 bpf_ctx_record_field_size(info, size_default); 1660 return bpf_ctx_narrow_access_ok(off, size, size_default); 1661 case bpf_ctx_range(struct bpf_sysctl, file_pos): 1662 if (type == BPF_READ) { 1663 bpf_ctx_record_field_size(info, size_default); 1664 return bpf_ctx_narrow_access_ok(off, size, size_default); 1665 } else { 1666 return size == size_default; 1667 } 1668 default: 1669 return false; 1670 } 1671 } 1672 1673 static u32 sysctl_convert_ctx_access(enum bpf_access_type type, 1674 const struct bpf_insn *si, 1675 struct bpf_insn *insn_buf, 1676 struct bpf_prog *prog, u32 *target_size) 1677 { 1678 struct bpf_insn *insn = insn_buf; 1679 u32 read_size; 1680 1681 switch (si->off) { 1682 case offsetof(struct bpf_sysctl, write): 1683 *insn++ = BPF_LDX_MEM( 1684 BPF_SIZE(si->code), si->dst_reg, si->src_reg, 1685 bpf_target_off(struct bpf_sysctl_kern, write, 1686 sizeof_field(struct bpf_sysctl_kern, 1687 write), 1688 target_size)); 1689 break; 1690 case offsetof(struct bpf_sysctl, file_pos): 1691 /* ppos is a pointer so it should be accessed via indirect 1692 * loads and stores. Also for stores additional temporary 1693 * register is used since neither src_reg nor dst_reg can be 1694 * overridden. 1695 */ 1696 if (type == BPF_WRITE) { 1697 int treg = BPF_REG_9; 1698 1699 if (si->src_reg == treg || si->dst_reg == treg) 1700 --treg; 1701 if (si->src_reg == treg || si->dst_reg == treg) 1702 --treg; 1703 *insn++ = BPF_STX_MEM( 1704 BPF_DW, si->dst_reg, treg, 1705 offsetof(struct bpf_sysctl_kern, tmp_reg)); 1706 *insn++ = BPF_LDX_MEM( 1707 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos), 1708 treg, si->dst_reg, 1709 offsetof(struct bpf_sysctl_kern, ppos)); 1710 *insn++ = BPF_STX_MEM( 1711 BPF_SIZEOF(u32), treg, si->src_reg, 1712 bpf_ctx_narrow_access_offset( 1713 0, sizeof(u32), sizeof(loff_t))); 1714 *insn++ = BPF_LDX_MEM( 1715 BPF_DW, treg, si->dst_reg, 1716 offsetof(struct bpf_sysctl_kern, tmp_reg)); 1717 } else { 1718 *insn++ = BPF_LDX_MEM( 1719 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos), 1720 si->dst_reg, si->src_reg, 1721 offsetof(struct bpf_sysctl_kern, ppos)); 1722 read_size = bpf_size_to_bytes(BPF_SIZE(si->code)); 1723 *insn++ = BPF_LDX_MEM( 1724 BPF_SIZE(si->code), si->dst_reg, si->dst_reg, 1725 bpf_ctx_narrow_access_offset( 1726 0, read_size, sizeof(loff_t))); 1727 } 1728 *target_size = sizeof(u32); 1729 break; 1730 } 1731 1732 return insn - insn_buf; 1733 } 1734 1735 const struct bpf_verifier_ops cg_sysctl_verifier_ops = { 1736 .get_func_proto = sysctl_func_proto, 1737 .is_valid_access = sysctl_is_valid_access, 1738 .convert_ctx_access = sysctl_convert_ctx_access, 1739 }; 1740 1741 const struct bpf_prog_ops cg_sysctl_prog_ops = { 1742 }; 1743 1744 static const struct bpf_func_proto * 1745 cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1746 { 1747 switch (func_id) { 1748 #ifdef CONFIG_NET 1749 case BPF_FUNC_sk_storage_get: 1750 return &bpf_sk_storage_get_proto; 1751 case BPF_FUNC_sk_storage_delete: 1752 return &bpf_sk_storage_delete_proto; 1753 #endif 1754 #ifdef CONFIG_INET 1755 case BPF_FUNC_tcp_sock: 1756 return &bpf_tcp_sock_proto; 1757 #endif 1758 default: 1759 return cgroup_base_func_proto(func_id, prog); 1760 } 1761 } 1762 1763 static bool cg_sockopt_is_valid_access(int off, int size, 1764 enum bpf_access_type type, 1765 const struct bpf_prog *prog, 1766 struct bpf_insn_access_aux *info) 1767 { 1768 const int size_default = sizeof(__u32); 1769 1770 if (off < 0 || off >= sizeof(struct bpf_sockopt)) 1771 return false; 1772 1773 if (off % size != 0) 1774 return false; 1775 1776 if (type == BPF_WRITE) { 1777 switch (off) { 1778 case offsetof(struct bpf_sockopt, retval): 1779 if (size != size_default) 1780 return false; 1781 return prog->expected_attach_type == 1782 BPF_CGROUP_GETSOCKOPT; 1783 case offsetof(struct bpf_sockopt, optname): 1784 /* fallthrough */ 1785 case offsetof(struct bpf_sockopt, level): 1786 if (size != size_default) 1787 return false; 1788 return prog->expected_attach_type == 1789 BPF_CGROUP_SETSOCKOPT; 1790 case offsetof(struct bpf_sockopt, optlen): 1791 return size == size_default; 1792 default: 1793 return false; 1794 } 1795 } 1796 1797 switch (off) { 1798 case offsetof(struct bpf_sockopt, sk): 1799 if (size != sizeof(__u64)) 1800 return false; 1801 info->reg_type = PTR_TO_SOCKET; 1802 break; 1803 case offsetof(struct bpf_sockopt, optval): 1804 if (size != sizeof(__u64)) 1805 return false; 1806 info->reg_type = PTR_TO_PACKET; 1807 break; 1808 case offsetof(struct bpf_sockopt, optval_end): 1809 if (size != sizeof(__u64)) 1810 return false; 1811 info->reg_type = PTR_TO_PACKET_END; 1812 break; 1813 case offsetof(struct bpf_sockopt, retval): 1814 if (size != size_default) 1815 return false; 1816 return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT; 1817 default: 1818 if (size != size_default) 1819 return false; 1820 break; 1821 } 1822 return true; 1823 } 1824 1825 #define CG_SOCKOPT_ACCESS_FIELD(T, F) \ 1826 T(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F), \ 1827 si->dst_reg, si->src_reg, \ 1828 offsetof(struct bpf_sockopt_kern, F)) 1829 1830 static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type, 1831 const struct bpf_insn *si, 1832 struct bpf_insn *insn_buf, 1833 struct bpf_prog *prog, 1834 u32 *target_size) 1835 { 1836 struct bpf_insn *insn = insn_buf; 1837 1838 switch (si->off) { 1839 case offsetof(struct bpf_sockopt, sk): 1840 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, sk); 1841 break; 1842 case offsetof(struct bpf_sockopt, level): 1843 if (type == BPF_WRITE) 1844 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, level); 1845 else 1846 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, level); 1847 break; 1848 case offsetof(struct bpf_sockopt, optname): 1849 if (type == BPF_WRITE) 1850 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optname); 1851 else 1852 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optname); 1853 break; 1854 case offsetof(struct bpf_sockopt, optlen): 1855 if (type == BPF_WRITE) 1856 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optlen); 1857 else 1858 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optlen); 1859 break; 1860 case offsetof(struct bpf_sockopt, retval): 1861 if (type == BPF_WRITE) 1862 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, retval); 1863 else 1864 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, retval); 1865 break; 1866 case offsetof(struct bpf_sockopt, optval): 1867 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval); 1868 break; 1869 case offsetof(struct bpf_sockopt, optval_end): 1870 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval_end); 1871 break; 1872 } 1873 1874 return insn - insn_buf; 1875 } 1876 1877 static int cg_sockopt_get_prologue(struct bpf_insn *insn_buf, 1878 bool direct_write, 1879 const struct bpf_prog *prog) 1880 { 1881 /* Nothing to do for sockopt argument. The data is kzalloc'ated. 1882 */ 1883 return 0; 1884 } 1885 1886 const struct bpf_verifier_ops cg_sockopt_verifier_ops = { 1887 .get_func_proto = cg_sockopt_func_proto, 1888 .is_valid_access = cg_sockopt_is_valid_access, 1889 .convert_ctx_access = cg_sockopt_convert_ctx_access, 1890 .gen_prologue = cg_sockopt_get_prologue, 1891 }; 1892 1893 const struct bpf_prog_ops cg_sockopt_prog_ops = { 1894 }; 1895