1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Functions to manage eBPF programs attached to cgroups 4 * 5 * Copyright (c) 2016 Daniel Mack 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/atomic.h> 10 #include <linux/cgroup.h> 11 #include <linux/filter.h> 12 #include <linux/slab.h> 13 #include <linux/sysctl.h> 14 #include <linux/string.h> 15 #include <linux/bpf.h> 16 #include <linux/bpf-cgroup.h> 17 #include <net/sock.h> 18 #include <net/bpf_sk_storage.h> 19 20 #include "../cgroup/cgroup-internal.h" 21 22 DEFINE_STATIC_KEY_FALSE(cgroup_bpf_enabled_key); 23 EXPORT_SYMBOL(cgroup_bpf_enabled_key); 24 25 void cgroup_bpf_offline(struct cgroup *cgrp) 26 { 27 cgroup_get(cgrp); 28 percpu_ref_kill(&cgrp->bpf.refcnt); 29 } 30 31 static void bpf_cgroup_storages_free(struct bpf_cgroup_storage *storages[]) 32 { 33 enum bpf_cgroup_storage_type stype; 34 35 for_each_cgroup_storage_type(stype) 36 bpf_cgroup_storage_free(storages[stype]); 37 } 38 39 static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage *storages[], 40 struct bpf_prog *prog) 41 { 42 enum bpf_cgroup_storage_type stype; 43 44 for_each_cgroup_storage_type(stype) { 45 storages[stype] = bpf_cgroup_storage_alloc(prog, stype); 46 if (IS_ERR(storages[stype])) { 47 storages[stype] = NULL; 48 bpf_cgroup_storages_free(storages); 49 return -ENOMEM; 50 } 51 } 52 53 return 0; 54 } 55 56 static void bpf_cgroup_storages_assign(struct bpf_cgroup_storage *dst[], 57 struct bpf_cgroup_storage *src[]) 58 { 59 enum bpf_cgroup_storage_type stype; 60 61 for_each_cgroup_storage_type(stype) 62 dst[stype] = src[stype]; 63 } 64 65 static void bpf_cgroup_storages_link(struct bpf_cgroup_storage *storages[], 66 struct cgroup* cgrp, 67 enum bpf_attach_type attach_type) 68 { 69 enum bpf_cgroup_storage_type stype; 70 71 for_each_cgroup_storage_type(stype) 72 bpf_cgroup_storage_link(storages[stype], cgrp, attach_type); 73 } 74 75 static void bpf_cgroup_storages_unlink(struct bpf_cgroup_storage *storages[]) 76 { 77 enum bpf_cgroup_storage_type stype; 78 79 for_each_cgroup_storage_type(stype) 80 bpf_cgroup_storage_unlink(storages[stype]); 81 } 82 83 /* Called when bpf_cgroup_link is auto-detached from dying cgroup. 84 * It drops cgroup and bpf_prog refcounts, and marks bpf_link as defunct. It 85 * doesn't free link memory, which will eventually be done by bpf_link's 86 * release() callback, when its last FD is closed. 87 */ 88 static void bpf_cgroup_link_auto_detach(struct bpf_cgroup_link *link) 89 { 90 cgroup_put(link->cgroup); 91 link->cgroup = NULL; 92 } 93 94 /** 95 * cgroup_bpf_release() - put references of all bpf programs and 96 * release all cgroup bpf data 97 * @work: work structure embedded into the cgroup to modify 98 */ 99 static void cgroup_bpf_release(struct work_struct *work) 100 { 101 struct cgroup *p, *cgrp = container_of(work, struct cgroup, 102 bpf.release_work); 103 struct bpf_prog_array *old_array; 104 unsigned int type; 105 106 mutex_lock(&cgroup_mutex); 107 108 for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) { 109 struct list_head *progs = &cgrp->bpf.progs[type]; 110 struct bpf_prog_list *pl, *tmp; 111 112 list_for_each_entry_safe(pl, tmp, progs, node) { 113 list_del(&pl->node); 114 if (pl->prog) 115 bpf_prog_put(pl->prog); 116 if (pl->link) 117 bpf_cgroup_link_auto_detach(pl->link); 118 bpf_cgroup_storages_unlink(pl->storage); 119 bpf_cgroup_storages_free(pl->storage); 120 kfree(pl); 121 static_branch_dec(&cgroup_bpf_enabled_key); 122 } 123 old_array = rcu_dereference_protected( 124 cgrp->bpf.effective[type], 125 lockdep_is_held(&cgroup_mutex)); 126 bpf_prog_array_free(old_array); 127 } 128 129 mutex_unlock(&cgroup_mutex); 130 131 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) 132 cgroup_bpf_put(p); 133 134 percpu_ref_exit(&cgrp->bpf.refcnt); 135 cgroup_put(cgrp); 136 } 137 138 /** 139 * cgroup_bpf_release_fn() - callback used to schedule releasing 140 * of bpf cgroup data 141 * @ref: percpu ref counter structure 142 */ 143 static void cgroup_bpf_release_fn(struct percpu_ref *ref) 144 { 145 struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt); 146 147 INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release); 148 queue_work(system_wq, &cgrp->bpf.release_work); 149 } 150 151 /* Get underlying bpf_prog of bpf_prog_list entry, regardless if it's through 152 * link or direct prog. 153 */ 154 static struct bpf_prog *prog_list_prog(struct bpf_prog_list *pl) 155 { 156 if (pl->prog) 157 return pl->prog; 158 if (pl->link) 159 return pl->link->link.prog; 160 return NULL; 161 } 162 163 /* count number of elements in the list. 164 * it's slow but the list cannot be long 165 */ 166 static u32 prog_list_length(struct list_head *head) 167 { 168 struct bpf_prog_list *pl; 169 u32 cnt = 0; 170 171 list_for_each_entry(pl, head, node) { 172 if (!prog_list_prog(pl)) 173 continue; 174 cnt++; 175 } 176 return cnt; 177 } 178 179 /* if parent has non-overridable prog attached, 180 * disallow attaching new programs to the descendent cgroup. 181 * if parent has overridable or multi-prog, allow attaching 182 */ 183 static bool hierarchy_allows_attach(struct cgroup *cgrp, 184 enum bpf_attach_type type) 185 { 186 struct cgroup *p; 187 188 p = cgroup_parent(cgrp); 189 if (!p) 190 return true; 191 do { 192 u32 flags = p->bpf.flags[type]; 193 u32 cnt; 194 195 if (flags & BPF_F_ALLOW_MULTI) 196 return true; 197 cnt = prog_list_length(&p->bpf.progs[type]); 198 WARN_ON_ONCE(cnt > 1); 199 if (cnt == 1) 200 return !!(flags & BPF_F_ALLOW_OVERRIDE); 201 p = cgroup_parent(p); 202 } while (p); 203 return true; 204 } 205 206 /* compute a chain of effective programs for a given cgroup: 207 * start from the list of programs in this cgroup and add 208 * all parent programs. 209 * Note that parent's F_ALLOW_OVERRIDE-type program is yielding 210 * to programs in this cgroup 211 */ 212 static int compute_effective_progs(struct cgroup *cgrp, 213 enum bpf_attach_type type, 214 struct bpf_prog_array **array) 215 { 216 struct bpf_prog_array_item *item; 217 struct bpf_prog_array *progs; 218 struct bpf_prog_list *pl; 219 struct cgroup *p = cgrp; 220 int cnt = 0; 221 222 /* count number of effective programs by walking parents */ 223 do { 224 if (cnt == 0 || (p->bpf.flags[type] & BPF_F_ALLOW_MULTI)) 225 cnt += prog_list_length(&p->bpf.progs[type]); 226 p = cgroup_parent(p); 227 } while (p); 228 229 progs = bpf_prog_array_alloc(cnt, GFP_KERNEL); 230 if (!progs) 231 return -ENOMEM; 232 233 /* populate the array with effective progs */ 234 cnt = 0; 235 p = cgrp; 236 do { 237 if (cnt > 0 && !(p->bpf.flags[type] & BPF_F_ALLOW_MULTI)) 238 continue; 239 240 list_for_each_entry(pl, &p->bpf.progs[type], node) { 241 if (!prog_list_prog(pl)) 242 continue; 243 244 item = &progs->items[cnt]; 245 item->prog = prog_list_prog(pl); 246 bpf_cgroup_storages_assign(item->cgroup_storage, 247 pl->storage); 248 cnt++; 249 } 250 } while ((p = cgroup_parent(p))); 251 252 *array = progs; 253 return 0; 254 } 255 256 static void activate_effective_progs(struct cgroup *cgrp, 257 enum bpf_attach_type type, 258 struct bpf_prog_array *old_array) 259 { 260 old_array = rcu_replace_pointer(cgrp->bpf.effective[type], old_array, 261 lockdep_is_held(&cgroup_mutex)); 262 /* free prog array after grace period, since __cgroup_bpf_run_*() 263 * might be still walking the array 264 */ 265 bpf_prog_array_free(old_array); 266 } 267 268 /** 269 * cgroup_bpf_inherit() - inherit effective programs from parent 270 * @cgrp: the cgroup to modify 271 */ 272 int cgroup_bpf_inherit(struct cgroup *cgrp) 273 { 274 /* has to use marco instead of const int, since compiler thinks 275 * that array below is variable length 276 */ 277 #define NR ARRAY_SIZE(cgrp->bpf.effective) 278 struct bpf_prog_array *arrays[NR] = {}; 279 struct cgroup *p; 280 int ret, i; 281 282 ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0, 283 GFP_KERNEL); 284 if (ret) 285 return ret; 286 287 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) 288 cgroup_bpf_get(p); 289 290 for (i = 0; i < NR; i++) 291 INIT_LIST_HEAD(&cgrp->bpf.progs[i]); 292 293 for (i = 0; i < NR; i++) 294 if (compute_effective_progs(cgrp, i, &arrays[i])) 295 goto cleanup; 296 297 for (i = 0; i < NR; i++) 298 activate_effective_progs(cgrp, i, arrays[i]); 299 300 return 0; 301 cleanup: 302 for (i = 0; i < NR; i++) 303 bpf_prog_array_free(arrays[i]); 304 305 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) 306 cgroup_bpf_put(p); 307 308 percpu_ref_exit(&cgrp->bpf.refcnt); 309 310 return -ENOMEM; 311 } 312 313 static int update_effective_progs(struct cgroup *cgrp, 314 enum bpf_attach_type type) 315 { 316 struct cgroup_subsys_state *css; 317 int err; 318 319 /* allocate and recompute effective prog arrays */ 320 css_for_each_descendant_pre(css, &cgrp->self) { 321 struct cgroup *desc = container_of(css, struct cgroup, self); 322 323 if (percpu_ref_is_zero(&desc->bpf.refcnt)) 324 continue; 325 326 err = compute_effective_progs(desc, type, &desc->bpf.inactive); 327 if (err) 328 goto cleanup; 329 } 330 331 /* all allocations were successful. Activate all prog arrays */ 332 css_for_each_descendant_pre(css, &cgrp->self) { 333 struct cgroup *desc = container_of(css, struct cgroup, self); 334 335 if (percpu_ref_is_zero(&desc->bpf.refcnt)) { 336 if (unlikely(desc->bpf.inactive)) { 337 bpf_prog_array_free(desc->bpf.inactive); 338 desc->bpf.inactive = NULL; 339 } 340 continue; 341 } 342 343 activate_effective_progs(desc, type, desc->bpf.inactive); 344 desc->bpf.inactive = NULL; 345 } 346 347 return 0; 348 349 cleanup: 350 /* oom while computing effective. Free all computed effective arrays 351 * since they were not activated 352 */ 353 css_for_each_descendant_pre(css, &cgrp->self) { 354 struct cgroup *desc = container_of(css, struct cgroup, self); 355 356 bpf_prog_array_free(desc->bpf.inactive); 357 desc->bpf.inactive = NULL; 358 } 359 360 return err; 361 } 362 363 #define BPF_CGROUP_MAX_PROGS 64 364 365 static struct bpf_prog_list *find_attach_entry(struct list_head *progs, 366 struct bpf_prog *prog, 367 struct bpf_cgroup_link *link, 368 struct bpf_prog *replace_prog, 369 bool allow_multi) 370 { 371 struct bpf_prog_list *pl; 372 373 /* single-attach case */ 374 if (!allow_multi) { 375 if (list_empty(progs)) 376 return NULL; 377 return list_first_entry(progs, typeof(*pl), node); 378 } 379 380 list_for_each_entry(pl, progs, node) { 381 if (prog && pl->prog == prog) 382 /* disallow attaching the same prog twice */ 383 return ERR_PTR(-EINVAL); 384 if (link && pl->link == link) 385 /* disallow attaching the same link twice */ 386 return ERR_PTR(-EINVAL); 387 } 388 389 /* direct prog multi-attach w/ replacement case */ 390 if (replace_prog) { 391 list_for_each_entry(pl, progs, node) { 392 if (pl->prog == replace_prog) 393 /* a match found */ 394 return pl; 395 } 396 /* prog to replace not found for cgroup */ 397 return ERR_PTR(-ENOENT); 398 } 399 400 return NULL; 401 } 402 403 /** 404 * __cgroup_bpf_attach() - Attach the program or the link to a cgroup, and 405 * propagate the change to descendants 406 * @cgrp: The cgroup which descendants to traverse 407 * @prog: A program to attach 408 * @link: A link to attach 409 * @replace_prog: Previously attached program to replace if BPF_F_REPLACE is set 410 * @type: Type of attach operation 411 * @flags: Option flags 412 * 413 * Exactly one of @prog or @link can be non-null. 414 * Must be called with cgroup_mutex held. 415 */ 416 int __cgroup_bpf_attach(struct cgroup *cgrp, 417 struct bpf_prog *prog, struct bpf_prog *replace_prog, 418 struct bpf_cgroup_link *link, 419 enum bpf_attach_type type, u32 flags) 420 { 421 u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI)); 422 struct list_head *progs = &cgrp->bpf.progs[type]; 423 struct bpf_prog *old_prog = NULL; 424 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {}; 425 struct bpf_cgroup_storage *old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {}; 426 struct bpf_prog_list *pl; 427 int err; 428 429 if (((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) || 430 ((flags & BPF_F_REPLACE) && !(flags & BPF_F_ALLOW_MULTI))) 431 /* invalid combination */ 432 return -EINVAL; 433 if (link && (prog || replace_prog)) 434 /* only either link or prog/replace_prog can be specified */ 435 return -EINVAL; 436 if (!!replace_prog != !!(flags & BPF_F_REPLACE)) 437 /* replace_prog implies BPF_F_REPLACE, and vice versa */ 438 return -EINVAL; 439 440 if (!hierarchy_allows_attach(cgrp, type)) 441 return -EPERM; 442 443 if (!list_empty(progs) && cgrp->bpf.flags[type] != saved_flags) 444 /* Disallow attaching non-overridable on top 445 * of existing overridable in this cgroup. 446 * Disallow attaching multi-prog if overridable or none 447 */ 448 return -EPERM; 449 450 if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS) 451 return -E2BIG; 452 453 pl = find_attach_entry(progs, prog, link, replace_prog, 454 flags & BPF_F_ALLOW_MULTI); 455 if (IS_ERR(pl)) 456 return PTR_ERR(pl); 457 458 if (bpf_cgroup_storages_alloc(storage, prog ? : link->link.prog)) 459 return -ENOMEM; 460 461 if (pl) { 462 old_prog = pl->prog; 463 bpf_cgroup_storages_unlink(pl->storage); 464 bpf_cgroup_storages_assign(old_storage, pl->storage); 465 } else { 466 pl = kmalloc(sizeof(*pl), GFP_KERNEL); 467 if (!pl) { 468 bpf_cgroup_storages_free(storage); 469 return -ENOMEM; 470 } 471 list_add_tail(&pl->node, progs); 472 } 473 474 pl->prog = prog; 475 pl->link = link; 476 bpf_cgroup_storages_assign(pl->storage, storage); 477 cgrp->bpf.flags[type] = saved_flags; 478 479 err = update_effective_progs(cgrp, type); 480 if (err) 481 goto cleanup; 482 483 bpf_cgroup_storages_free(old_storage); 484 if (old_prog) 485 bpf_prog_put(old_prog); 486 else 487 static_branch_inc(&cgroup_bpf_enabled_key); 488 bpf_cgroup_storages_link(pl->storage, cgrp, type); 489 return 0; 490 491 cleanup: 492 if (old_prog) { 493 pl->prog = old_prog; 494 pl->link = NULL; 495 } 496 bpf_cgroup_storages_free(pl->storage); 497 bpf_cgroup_storages_assign(pl->storage, old_storage); 498 bpf_cgroup_storages_link(pl->storage, cgrp, type); 499 if (!old_prog) { 500 list_del(&pl->node); 501 kfree(pl); 502 } 503 return err; 504 } 505 506 /* Swap updated BPF program for given link in effective program arrays across 507 * all descendant cgroups. This function is guaranteed to succeed. 508 */ 509 static void replace_effective_prog(struct cgroup *cgrp, 510 enum bpf_attach_type type, 511 struct bpf_cgroup_link *link) 512 { 513 struct bpf_prog_array_item *item; 514 struct cgroup_subsys_state *css; 515 struct bpf_prog_array *progs; 516 struct bpf_prog_list *pl; 517 struct list_head *head; 518 struct cgroup *cg; 519 int pos; 520 521 css_for_each_descendant_pre(css, &cgrp->self) { 522 struct cgroup *desc = container_of(css, struct cgroup, self); 523 524 if (percpu_ref_is_zero(&desc->bpf.refcnt)) 525 continue; 526 527 /* find position of link in effective progs array */ 528 for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) { 529 if (pos && !(cg->bpf.flags[type] & BPF_F_ALLOW_MULTI)) 530 continue; 531 532 head = &cg->bpf.progs[type]; 533 list_for_each_entry(pl, head, node) { 534 if (!prog_list_prog(pl)) 535 continue; 536 if (pl->link == link) 537 goto found; 538 pos++; 539 } 540 } 541 found: 542 BUG_ON(!cg); 543 progs = rcu_dereference_protected( 544 desc->bpf.effective[type], 545 lockdep_is_held(&cgroup_mutex)); 546 item = &progs->items[pos]; 547 WRITE_ONCE(item->prog, link->link.prog); 548 } 549 } 550 551 /** 552 * __cgroup_bpf_replace() - Replace link's program and propagate the change 553 * to descendants 554 * @cgrp: The cgroup which descendants to traverse 555 * @link: A link for which to replace BPF program 556 * @type: Type of attach operation 557 * 558 * Must be called with cgroup_mutex held. 559 */ 560 int __cgroup_bpf_replace(struct cgroup *cgrp, struct bpf_cgroup_link *link, 561 struct bpf_prog *new_prog) 562 { 563 struct list_head *progs = &cgrp->bpf.progs[link->type]; 564 struct bpf_prog *old_prog; 565 struct bpf_prog_list *pl; 566 bool found = false; 567 568 if (link->link.prog->type != new_prog->type) 569 return -EINVAL; 570 571 list_for_each_entry(pl, progs, node) { 572 if (pl->link == link) { 573 found = true; 574 break; 575 } 576 } 577 if (!found) 578 return -ENOENT; 579 580 old_prog = xchg(&link->link.prog, new_prog); 581 replace_effective_prog(cgrp, link->type, link); 582 bpf_prog_put(old_prog); 583 return 0; 584 } 585 586 static struct bpf_prog_list *find_detach_entry(struct list_head *progs, 587 struct bpf_prog *prog, 588 struct bpf_cgroup_link *link, 589 bool allow_multi) 590 { 591 struct bpf_prog_list *pl; 592 593 if (!allow_multi) { 594 if (list_empty(progs)) 595 /* report error when trying to detach and nothing is attached */ 596 return ERR_PTR(-ENOENT); 597 598 /* to maintain backward compatibility NONE and OVERRIDE cgroups 599 * allow detaching with invalid FD (prog==NULL) in legacy mode 600 */ 601 return list_first_entry(progs, typeof(*pl), node); 602 } 603 604 if (!prog && !link) 605 /* to detach MULTI prog the user has to specify valid FD 606 * of the program or link to be detached 607 */ 608 return ERR_PTR(-EINVAL); 609 610 /* find the prog or link and detach it */ 611 list_for_each_entry(pl, progs, node) { 612 if (pl->prog == prog && pl->link == link) 613 return pl; 614 } 615 return ERR_PTR(-ENOENT); 616 } 617 618 /** 619 * __cgroup_bpf_detach() - Detach the program or link from a cgroup, and 620 * propagate the change to descendants 621 * @cgrp: The cgroup which descendants to traverse 622 * @prog: A program to detach or NULL 623 * @prog: A link to detach or NULL 624 * @type: Type of detach operation 625 * 626 * At most one of @prog or @link can be non-NULL. 627 * Must be called with cgroup_mutex held. 628 */ 629 int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, 630 struct bpf_cgroup_link *link, enum bpf_attach_type type) 631 { 632 struct list_head *progs = &cgrp->bpf.progs[type]; 633 u32 flags = cgrp->bpf.flags[type]; 634 struct bpf_prog_list *pl; 635 struct bpf_prog *old_prog; 636 int err; 637 638 if (prog && link) 639 /* only one of prog or link can be specified */ 640 return -EINVAL; 641 642 pl = find_detach_entry(progs, prog, link, flags & BPF_F_ALLOW_MULTI); 643 if (IS_ERR(pl)) 644 return PTR_ERR(pl); 645 646 /* mark it deleted, so it's ignored while recomputing effective */ 647 old_prog = pl->prog; 648 pl->prog = NULL; 649 pl->link = NULL; 650 651 err = update_effective_progs(cgrp, type); 652 if (err) 653 goto cleanup; 654 655 /* now can actually delete it from this cgroup list */ 656 list_del(&pl->node); 657 bpf_cgroup_storages_unlink(pl->storage); 658 bpf_cgroup_storages_free(pl->storage); 659 kfree(pl); 660 if (list_empty(progs)) 661 /* last program was detached, reset flags to zero */ 662 cgrp->bpf.flags[type] = 0; 663 if (old_prog) 664 bpf_prog_put(old_prog); 665 static_branch_dec(&cgroup_bpf_enabled_key); 666 return 0; 667 668 cleanup: 669 /* restore back prog or link */ 670 pl->prog = old_prog; 671 pl->link = link; 672 return err; 673 } 674 675 /* Must be called with cgroup_mutex held to avoid races. */ 676 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, 677 union bpf_attr __user *uattr) 678 { 679 __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); 680 enum bpf_attach_type type = attr->query.attach_type; 681 struct list_head *progs = &cgrp->bpf.progs[type]; 682 u32 flags = cgrp->bpf.flags[type]; 683 struct bpf_prog_array *effective; 684 struct bpf_prog *prog; 685 int cnt, ret = 0, i; 686 687 effective = rcu_dereference_protected(cgrp->bpf.effective[type], 688 lockdep_is_held(&cgroup_mutex)); 689 690 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) 691 cnt = bpf_prog_array_length(effective); 692 else 693 cnt = prog_list_length(progs); 694 695 if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags))) 696 return -EFAULT; 697 if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt))) 698 return -EFAULT; 699 if (attr->query.prog_cnt == 0 || !prog_ids || !cnt) 700 /* return early if user requested only program count + flags */ 701 return 0; 702 if (attr->query.prog_cnt < cnt) { 703 cnt = attr->query.prog_cnt; 704 ret = -ENOSPC; 705 } 706 707 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) { 708 return bpf_prog_array_copy_to_user(effective, prog_ids, cnt); 709 } else { 710 struct bpf_prog_list *pl; 711 u32 id; 712 713 i = 0; 714 list_for_each_entry(pl, progs, node) { 715 prog = prog_list_prog(pl); 716 id = prog->aux->id; 717 if (copy_to_user(prog_ids + i, &id, sizeof(id))) 718 return -EFAULT; 719 if (++i == cnt) 720 break; 721 } 722 } 723 return ret; 724 } 725 726 int cgroup_bpf_prog_attach(const union bpf_attr *attr, 727 enum bpf_prog_type ptype, struct bpf_prog *prog) 728 { 729 struct bpf_prog *replace_prog = NULL; 730 struct cgroup *cgrp; 731 int ret; 732 733 cgrp = cgroup_get_from_fd(attr->target_fd); 734 if (IS_ERR(cgrp)) 735 return PTR_ERR(cgrp); 736 737 if ((attr->attach_flags & BPF_F_ALLOW_MULTI) && 738 (attr->attach_flags & BPF_F_REPLACE)) { 739 replace_prog = bpf_prog_get_type(attr->replace_bpf_fd, ptype); 740 if (IS_ERR(replace_prog)) { 741 cgroup_put(cgrp); 742 return PTR_ERR(replace_prog); 743 } 744 } 745 746 ret = cgroup_bpf_attach(cgrp, prog, replace_prog, NULL, 747 attr->attach_type, attr->attach_flags); 748 749 if (replace_prog) 750 bpf_prog_put(replace_prog); 751 cgroup_put(cgrp); 752 return ret; 753 } 754 755 int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) 756 { 757 struct bpf_prog *prog; 758 struct cgroup *cgrp; 759 int ret; 760 761 cgrp = cgroup_get_from_fd(attr->target_fd); 762 if (IS_ERR(cgrp)) 763 return PTR_ERR(cgrp); 764 765 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 766 if (IS_ERR(prog)) 767 prog = NULL; 768 769 ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type); 770 if (prog) 771 bpf_prog_put(prog); 772 773 cgroup_put(cgrp); 774 return ret; 775 } 776 777 static void bpf_cgroup_link_release(struct bpf_link *link) 778 { 779 struct bpf_cgroup_link *cg_link = 780 container_of(link, struct bpf_cgroup_link, link); 781 782 /* link might have been auto-detached by dying cgroup already, 783 * in that case our work is done here 784 */ 785 if (!cg_link->cgroup) 786 return; 787 788 mutex_lock(&cgroup_mutex); 789 790 /* re-check cgroup under lock again */ 791 if (!cg_link->cgroup) { 792 mutex_unlock(&cgroup_mutex); 793 return; 794 } 795 796 WARN_ON(__cgroup_bpf_detach(cg_link->cgroup, NULL, cg_link, 797 cg_link->type)); 798 799 mutex_unlock(&cgroup_mutex); 800 cgroup_put(cg_link->cgroup); 801 } 802 803 static void bpf_cgroup_link_dealloc(struct bpf_link *link) 804 { 805 struct bpf_cgroup_link *cg_link = 806 container_of(link, struct bpf_cgroup_link, link); 807 808 kfree(cg_link); 809 } 810 811 const struct bpf_link_ops bpf_cgroup_link_lops = { 812 .release = bpf_cgroup_link_release, 813 .dealloc = bpf_cgroup_link_dealloc, 814 }; 815 816 int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 817 { 818 struct bpf_cgroup_link *link; 819 struct file *link_file; 820 struct cgroup *cgrp; 821 int err, link_fd; 822 823 if (attr->link_create.flags) 824 return -EINVAL; 825 826 cgrp = cgroup_get_from_fd(attr->link_create.target_fd); 827 if (IS_ERR(cgrp)) 828 return PTR_ERR(cgrp); 829 830 link = kzalloc(sizeof(*link), GFP_USER); 831 if (!link) { 832 err = -ENOMEM; 833 goto out_put_cgroup; 834 } 835 bpf_link_init(&link->link, &bpf_cgroup_link_lops, prog); 836 link->cgroup = cgrp; 837 link->type = attr->link_create.attach_type; 838 839 link_file = bpf_link_new_file(&link->link, &link_fd); 840 if (IS_ERR(link_file)) { 841 kfree(link); 842 err = PTR_ERR(link_file); 843 goto out_put_cgroup; 844 } 845 846 err = cgroup_bpf_attach(cgrp, NULL, NULL, link, link->type, 847 BPF_F_ALLOW_MULTI); 848 if (err) { 849 bpf_link_cleanup(&link->link, link_file, link_fd); 850 goto out_put_cgroup; 851 } 852 853 fd_install(link_fd, link_file); 854 return link_fd; 855 856 out_put_cgroup: 857 cgroup_put(cgrp); 858 return err; 859 } 860 861 int cgroup_bpf_prog_query(const union bpf_attr *attr, 862 union bpf_attr __user *uattr) 863 { 864 struct cgroup *cgrp; 865 int ret; 866 867 cgrp = cgroup_get_from_fd(attr->query.target_fd); 868 if (IS_ERR(cgrp)) 869 return PTR_ERR(cgrp); 870 871 ret = cgroup_bpf_query(cgrp, attr, uattr); 872 873 cgroup_put(cgrp); 874 return ret; 875 } 876 877 /** 878 * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering 879 * @sk: The socket sending or receiving traffic 880 * @skb: The skb that is being sent or received 881 * @type: The type of program to be exectuted 882 * 883 * If no socket is passed, or the socket is not of type INET or INET6, 884 * this function does nothing and returns 0. 885 * 886 * The program type passed in via @type must be suitable for network 887 * filtering. No further check is performed to assert that. 888 * 889 * For egress packets, this function can return: 890 * NET_XMIT_SUCCESS (0) - continue with packet output 891 * NET_XMIT_DROP (1) - drop packet and notify TCP to call cwr 892 * NET_XMIT_CN (2) - continue with packet output and notify TCP 893 * to call cwr 894 * -EPERM - drop packet 895 * 896 * For ingress packets, this function will return -EPERM if any 897 * attached program was found and if it returned != 1 during execution. 898 * Otherwise 0 is returned. 899 */ 900 int __cgroup_bpf_run_filter_skb(struct sock *sk, 901 struct sk_buff *skb, 902 enum bpf_attach_type type) 903 { 904 unsigned int offset = skb->data - skb_network_header(skb); 905 struct sock *save_sk; 906 void *saved_data_end; 907 struct cgroup *cgrp; 908 int ret; 909 910 if (!sk || !sk_fullsock(sk)) 911 return 0; 912 913 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6) 914 return 0; 915 916 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 917 save_sk = skb->sk; 918 skb->sk = sk; 919 __skb_push(skb, offset); 920 921 /* compute pointers for the bpf prog */ 922 bpf_compute_and_save_data_end(skb, &saved_data_end); 923 924 if (type == BPF_CGROUP_INET_EGRESS) { 925 ret = BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY( 926 cgrp->bpf.effective[type], skb, __bpf_prog_run_save_cb); 927 } else { 928 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], skb, 929 __bpf_prog_run_save_cb); 930 ret = (ret == 1 ? 0 : -EPERM); 931 } 932 bpf_restore_data_end(skb, saved_data_end); 933 __skb_pull(skb, offset); 934 skb->sk = save_sk; 935 936 return ret; 937 } 938 EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb); 939 940 /** 941 * __cgroup_bpf_run_filter_sk() - Run a program on a sock 942 * @sk: sock structure to manipulate 943 * @type: The type of program to be exectuted 944 * 945 * socket is passed is expected to be of type INET or INET6. 946 * 947 * The program type passed in via @type must be suitable for sock 948 * filtering. No further check is performed to assert that. 949 * 950 * This function will return %-EPERM if any if an attached program was found 951 * and if it returned != 1 during execution. In all other cases, 0 is returned. 952 */ 953 int __cgroup_bpf_run_filter_sk(struct sock *sk, 954 enum bpf_attach_type type) 955 { 956 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 957 int ret; 958 959 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sk, BPF_PROG_RUN); 960 return ret == 1 ? 0 : -EPERM; 961 } 962 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk); 963 964 /** 965 * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and 966 * provided by user sockaddr 967 * @sk: sock struct that will use sockaddr 968 * @uaddr: sockaddr struct provided by user 969 * @type: The type of program to be exectuted 970 * @t_ctx: Pointer to attach type specific context 971 * 972 * socket is expected to be of type INET or INET6. 973 * 974 * This function will return %-EPERM if an attached program is found and 975 * returned value != 1 during execution. In all other cases, 0 is returned. 976 */ 977 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, 978 struct sockaddr *uaddr, 979 enum bpf_attach_type type, 980 void *t_ctx) 981 { 982 struct bpf_sock_addr_kern ctx = { 983 .sk = sk, 984 .uaddr = uaddr, 985 .t_ctx = t_ctx, 986 }; 987 struct sockaddr_storage unspec; 988 struct cgroup *cgrp; 989 int ret; 990 991 /* Check socket family since not all sockets represent network 992 * endpoint (e.g. AF_UNIX). 993 */ 994 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6) 995 return 0; 996 997 if (!ctx.uaddr) { 998 memset(&unspec, 0, sizeof(unspec)); 999 ctx.uaddr = (struct sockaddr *)&unspec; 1000 } 1001 1002 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1003 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN); 1004 1005 return ret == 1 ? 0 : -EPERM; 1006 } 1007 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr); 1008 1009 /** 1010 * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock 1011 * @sk: socket to get cgroup from 1012 * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains 1013 * sk with connection information (IP addresses, etc.) May not contain 1014 * cgroup info if it is a req sock. 1015 * @type: The type of program to be exectuted 1016 * 1017 * socket passed is expected to be of type INET or INET6. 1018 * 1019 * The program type passed in via @type must be suitable for sock_ops 1020 * filtering. No further check is performed to assert that. 1021 * 1022 * This function will return %-EPERM if any if an attached program was found 1023 * and if it returned != 1 during execution. In all other cases, 0 is returned. 1024 */ 1025 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, 1026 struct bpf_sock_ops_kern *sock_ops, 1027 enum bpf_attach_type type) 1028 { 1029 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1030 int ret; 1031 1032 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], sock_ops, 1033 BPF_PROG_RUN); 1034 return ret == 1 ? 0 : -EPERM; 1035 } 1036 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops); 1037 1038 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, 1039 short access, enum bpf_attach_type type) 1040 { 1041 struct cgroup *cgrp; 1042 struct bpf_cgroup_dev_ctx ctx = { 1043 .access_type = (access << 16) | dev_type, 1044 .major = major, 1045 .minor = minor, 1046 }; 1047 int allow = 1; 1048 1049 rcu_read_lock(); 1050 cgrp = task_dfl_cgroup(current); 1051 allow = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, 1052 BPF_PROG_RUN); 1053 rcu_read_unlock(); 1054 1055 return !allow; 1056 } 1057 1058 static const struct bpf_func_proto * 1059 cgroup_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1060 { 1061 switch (func_id) { 1062 case BPF_FUNC_get_current_uid_gid: 1063 return &bpf_get_current_uid_gid_proto; 1064 case BPF_FUNC_get_local_storage: 1065 return &bpf_get_local_storage_proto; 1066 case BPF_FUNC_get_current_cgroup_id: 1067 return &bpf_get_current_cgroup_id_proto; 1068 case BPF_FUNC_perf_event_output: 1069 return &bpf_event_output_data_proto; 1070 default: 1071 return bpf_base_func_proto(func_id); 1072 } 1073 } 1074 1075 static const struct bpf_func_proto * 1076 cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1077 { 1078 return cgroup_base_func_proto(func_id, prog); 1079 } 1080 1081 static bool cgroup_dev_is_valid_access(int off, int size, 1082 enum bpf_access_type type, 1083 const struct bpf_prog *prog, 1084 struct bpf_insn_access_aux *info) 1085 { 1086 const int size_default = sizeof(__u32); 1087 1088 if (type == BPF_WRITE) 1089 return false; 1090 1091 if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx)) 1092 return false; 1093 /* The verifier guarantees that size > 0. */ 1094 if (off % size != 0) 1095 return false; 1096 1097 switch (off) { 1098 case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type): 1099 bpf_ctx_record_field_size(info, size_default); 1100 if (!bpf_ctx_narrow_access_ok(off, size, size_default)) 1101 return false; 1102 break; 1103 default: 1104 if (size != size_default) 1105 return false; 1106 } 1107 1108 return true; 1109 } 1110 1111 const struct bpf_prog_ops cg_dev_prog_ops = { 1112 }; 1113 1114 const struct bpf_verifier_ops cg_dev_verifier_ops = { 1115 .get_func_proto = cgroup_dev_func_proto, 1116 .is_valid_access = cgroup_dev_is_valid_access, 1117 }; 1118 1119 /** 1120 * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl 1121 * 1122 * @head: sysctl table header 1123 * @table: sysctl table 1124 * @write: sysctl is being read (= 0) or written (= 1) 1125 * @buf: pointer to buffer passed by user space 1126 * @pcount: value-result argument: value is size of buffer pointed to by @buf, 1127 * result is size of @new_buf if program set new value, initial value 1128 * otherwise 1129 * @ppos: value-result argument: value is position at which read from or write 1130 * to sysctl is happening, result is new position if program overrode it, 1131 * initial value otherwise 1132 * @new_buf: pointer to pointer to new buffer that will be allocated if program 1133 * overrides new value provided by user space on sysctl write 1134 * NOTE: it's caller responsibility to free *new_buf if it was set 1135 * @type: type of program to be executed 1136 * 1137 * Program is run when sysctl is being accessed, either read or written, and 1138 * can allow or deny such access. 1139 * 1140 * This function will return %-EPERM if an attached program is found and 1141 * returned value != 1 during execution. In all other cases 0 is returned. 1142 */ 1143 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, 1144 struct ctl_table *table, int write, 1145 void __user *buf, size_t *pcount, 1146 loff_t *ppos, void **new_buf, 1147 enum bpf_attach_type type) 1148 { 1149 struct bpf_sysctl_kern ctx = { 1150 .head = head, 1151 .table = table, 1152 .write = write, 1153 .ppos = ppos, 1154 .cur_val = NULL, 1155 .cur_len = PAGE_SIZE, 1156 .new_val = NULL, 1157 .new_len = 0, 1158 .new_updated = 0, 1159 }; 1160 struct cgroup *cgrp; 1161 int ret; 1162 1163 ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL); 1164 if (ctx.cur_val) { 1165 mm_segment_t old_fs; 1166 loff_t pos = 0; 1167 1168 old_fs = get_fs(); 1169 set_fs(KERNEL_DS); 1170 if (table->proc_handler(table, 0, (void __user *)ctx.cur_val, 1171 &ctx.cur_len, &pos)) { 1172 /* Let BPF program decide how to proceed. */ 1173 ctx.cur_len = 0; 1174 } 1175 set_fs(old_fs); 1176 } else { 1177 /* Let BPF program decide how to proceed. */ 1178 ctx.cur_len = 0; 1179 } 1180 1181 if (write && buf && *pcount) { 1182 /* BPF program should be able to override new value with a 1183 * buffer bigger than provided by user. 1184 */ 1185 ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL); 1186 ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount); 1187 if (!ctx.new_val || 1188 copy_from_user(ctx.new_val, buf, ctx.new_len)) 1189 /* Let BPF program decide how to proceed. */ 1190 ctx.new_len = 0; 1191 } 1192 1193 rcu_read_lock(); 1194 cgrp = task_dfl_cgroup(current); 1195 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[type], &ctx, BPF_PROG_RUN); 1196 rcu_read_unlock(); 1197 1198 kfree(ctx.cur_val); 1199 1200 if (ret == 1 && ctx.new_updated) { 1201 *new_buf = ctx.new_val; 1202 *pcount = ctx.new_len; 1203 } else { 1204 kfree(ctx.new_val); 1205 } 1206 1207 return ret == 1 ? 0 : -EPERM; 1208 } 1209 1210 #ifdef CONFIG_NET 1211 static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp, 1212 enum bpf_attach_type attach_type) 1213 { 1214 struct bpf_prog_array *prog_array; 1215 bool empty; 1216 1217 rcu_read_lock(); 1218 prog_array = rcu_dereference(cgrp->bpf.effective[attach_type]); 1219 empty = bpf_prog_array_is_empty(prog_array); 1220 rcu_read_unlock(); 1221 1222 return empty; 1223 } 1224 1225 static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen) 1226 { 1227 if (unlikely(max_optlen > PAGE_SIZE) || max_optlen < 0) 1228 return -EINVAL; 1229 1230 ctx->optval = kzalloc(max_optlen, GFP_USER); 1231 if (!ctx->optval) 1232 return -ENOMEM; 1233 1234 ctx->optval_end = ctx->optval + max_optlen; 1235 1236 return 0; 1237 } 1238 1239 static void sockopt_free_buf(struct bpf_sockopt_kern *ctx) 1240 { 1241 kfree(ctx->optval); 1242 } 1243 1244 int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level, 1245 int *optname, char __user *optval, 1246 int *optlen, char **kernel_optval) 1247 { 1248 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1249 struct bpf_sockopt_kern ctx = { 1250 .sk = sk, 1251 .level = *level, 1252 .optname = *optname, 1253 }; 1254 int ret, max_optlen; 1255 1256 /* Opportunistic check to see whether we have any BPF program 1257 * attached to the hook so we don't waste time allocating 1258 * memory and locking the socket. 1259 */ 1260 if (!cgroup_bpf_enabled || 1261 __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_SETSOCKOPT)) 1262 return 0; 1263 1264 /* Allocate a bit more than the initial user buffer for 1265 * BPF program. The canonical use case is overriding 1266 * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic). 1267 */ 1268 max_optlen = max_t(int, 16, *optlen); 1269 1270 ret = sockopt_alloc_buf(&ctx, max_optlen); 1271 if (ret) 1272 return ret; 1273 1274 ctx.optlen = *optlen; 1275 1276 if (copy_from_user(ctx.optval, optval, *optlen) != 0) { 1277 ret = -EFAULT; 1278 goto out; 1279 } 1280 1281 lock_sock(sk); 1282 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_SETSOCKOPT], 1283 &ctx, BPF_PROG_RUN); 1284 release_sock(sk); 1285 1286 if (!ret) { 1287 ret = -EPERM; 1288 goto out; 1289 } 1290 1291 if (ctx.optlen == -1) { 1292 /* optlen set to -1, bypass kernel */ 1293 ret = 1; 1294 } else if (ctx.optlen > max_optlen || ctx.optlen < -1) { 1295 /* optlen is out of bounds */ 1296 ret = -EFAULT; 1297 } else { 1298 /* optlen within bounds, run kernel handler */ 1299 ret = 0; 1300 1301 /* export any potential modifications */ 1302 *level = ctx.level; 1303 *optname = ctx.optname; 1304 *optlen = ctx.optlen; 1305 *kernel_optval = ctx.optval; 1306 } 1307 1308 out: 1309 if (ret) 1310 sockopt_free_buf(&ctx); 1311 return ret; 1312 } 1313 1314 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, 1315 int optname, char __user *optval, 1316 int __user *optlen, int max_optlen, 1317 int retval) 1318 { 1319 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1320 struct bpf_sockopt_kern ctx = { 1321 .sk = sk, 1322 .level = level, 1323 .optname = optname, 1324 .retval = retval, 1325 }; 1326 int ret; 1327 1328 /* Opportunistic check to see whether we have any BPF program 1329 * attached to the hook so we don't waste time allocating 1330 * memory and locking the socket. 1331 */ 1332 if (!cgroup_bpf_enabled || 1333 __cgroup_bpf_prog_array_is_empty(cgrp, BPF_CGROUP_GETSOCKOPT)) 1334 return retval; 1335 1336 ret = sockopt_alloc_buf(&ctx, max_optlen); 1337 if (ret) 1338 return ret; 1339 1340 ctx.optlen = max_optlen; 1341 1342 if (!retval) { 1343 /* If kernel getsockopt finished successfully, 1344 * copy whatever was returned to the user back 1345 * into our temporary buffer. Set optlen to the 1346 * one that kernel returned as well to let 1347 * BPF programs inspect the value. 1348 */ 1349 1350 if (get_user(ctx.optlen, optlen)) { 1351 ret = -EFAULT; 1352 goto out; 1353 } 1354 1355 if (ctx.optlen > max_optlen) 1356 ctx.optlen = max_optlen; 1357 1358 if (copy_from_user(ctx.optval, optval, ctx.optlen) != 0) { 1359 ret = -EFAULT; 1360 goto out; 1361 } 1362 } 1363 1364 lock_sock(sk); 1365 ret = BPF_PROG_RUN_ARRAY(cgrp->bpf.effective[BPF_CGROUP_GETSOCKOPT], 1366 &ctx, BPF_PROG_RUN); 1367 release_sock(sk); 1368 1369 if (!ret) { 1370 ret = -EPERM; 1371 goto out; 1372 } 1373 1374 if (ctx.optlen > max_optlen) { 1375 ret = -EFAULT; 1376 goto out; 1377 } 1378 1379 /* BPF programs only allowed to set retval to 0, not some 1380 * arbitrary value. 1381 */ 1382 if (ctx.retval != 0 && ctx.retval != retval) { 1383 ret = -EFAULT; 1384 goto out; 1385 } 1386 1387 if (copy_to_user(optval, ctx.optval, ctx.optlen) || 1388 put_user(ctx.optlen, optlen)) { 1389 ret = -EFAULT; 1390 goto out; 1391 } 1392 1393 ret = ctx.retval; 1394 1395 out: 1396 sockopt_free_buf(&ctx); 1397 return ret; 1398 } 1399 #endif 1400 1401 static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp, 1402 size_t *lenp) 1403 { 1404 ssize_t tmp_ret = 0, ret; 1405 1406 if (dir->header.parent) { 1407 tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp); 1408 if (tmp_ret < 0) 1409 return tmp_ret; 1410 } 1411 1412 ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp); 1413 if (ret < 0) 1414 return ret; 1415 *bufp += ret; 1416 *lenp -= ret; 1417 ret += tmp_ret; 1418 1419 /* Avoid leading slash. */ 1420 if (!ret) 1421 return ret; 1422 1423 tmp_ret = strscpy(*bufp, "/", *lenp); 1424 if (tmp_ret < 0) 1425 return tmp_ret; 1426 *bufp += tmp_ret; 1427 *lenp -= tmp_ret; 1428 1429 return ret + tmp_ret; 1430 } 1431 1432 BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf, 1433 size_t, buf_len, u64, flags) 1434 { 1435 ssize_t tmp_ret = 0, ret; 1436 1437 if (!buf) 1438 return -EINVAL; 1439 1440 if (!(flags & BPF_F_SYSCTL_BASE_NAME)) { 1441 if (!ctx->head) 1442 return -EINVAL; 1443 tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len); 1444 if (tmp_ret < 0) 1445 return tmp_ret; 1446 } 1447 1448 ret = strscpy(buf, ctx->table->procname, buf_len); 1449 1450 return ret < 0 ? ret : tmp_ret + ret; 1451 } 1452 1453 static const struct bpf_func_proto bpf_sysctl_get_name_proto = { 1454 .func = bpf_sysctl_get_name, 1455 .gpl_only = false, 1456 .ret_type = RET_INTEGER, 1457 .arg1_type = ARG_PTR_TO_CTX, 1458 .arg2_type = ARG_PTR_TO_MEM, 1459 .arg3_type = ARG_CONST_SIZE, 1460 .arg4_type = ARG_ANYTHING, 1461 }; 1462 1463 static int copy_sysctl_value(char *dst, size_t dst_len, char *src, 1464 size_t src_len) 1465 { 1466 if (!dst) 1467 return -EINVAL; 1468 1469 if (!dst_len) 1470 return -E2BIG; 1471 1472 if (!src || !src_len) { 1473 memset(dst, 0, dst_len); 1474 return -EINVAL; 1475 } 1476 1477 memcpy(dst, src, min(dst_len, src_len)); 1478 1479 if (dst_len > src_len) { 1480 memset(dst + src_len, '\0', dst_len - src_len); 1481 return src_len; 1482 } 1483 1484 dst[dst_len - 1] = '\0'; 1485 1486 return -E2BIG; 1487 } 1488 1489 BPF_CALL_3(bpf_sysctl_get_current_value, struct bpf_sysctl_kern *, ctx, 1490 char *, buf, size_t, buf_len) 1491 { 1492 return copy_sysctl_value(buf, buf_len, ctx->cur_val, ctx->cur_len); 1493 } 1494 1495 static const struct bpf_func_proto bpf_sysctl_get_current_value_proto = { 1496 .func = bpf_sysctl_get_current_value, 1497 .gpl_only = false, 1498 .ret_type = RET_INTEGER, 1499 .arg1_type = ARG_PTR_TO_CTX, 1500 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1501 .arg3_type = ARG_CONST_SIZE, 1502 }; 1503 1504 BPF_CALL_3(bpf_sysctl_get_new_value, struct bpf_sysctl_kern *, ctx, char *, buf, 1505 size_t, buf_len) 1506 { 1507 if (!ctx->write) { 1508 if (buf && buf_len) 1509 memset(buf, '\0', buf_len); 1510 return -EINVAL; 1511 } 1512 return copy_sysctl_value(buf, buf_len, ctx->new_val, ctx->new_len); 1513 } 1514 1515 static const struct bpf_func_proto bpf_sysctl_get_new_value_proto = { 1516 .func = bpf_sysctl_get_new_value, 1517 .gpl_only = false, 1518 .ret_type = RET_INTEGER, 1519 .arg1_type = ARG_PTR_TO_CTX, 1520 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1521 .arg3_type = ARG_CONST_SIZE, 1522 }; 1523 1524 BPF_CALL_3(bpf_sysctl_set_new_value, struct bpf_sysctl_kern *, ctx, 1525 const char *, buf, size_t, buf_len) 1526 { 1527 if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len) 1528 return -EINVAL; 1529 1530 if (buf_len > PAGE_SIZE - 1) 1531 return -E2BIG; 1532 1533 memcpy(ctx->new_val, buf, buf_len); 1534 ctx->new_len = buf_len; 1535 ctx->new_updated = 1; 1536 1537 return 0; 1538 } 1539 1540 static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = { 1541 .func = bpf_sysctl_set_new_value, 1542 .gpl_only = false, 1543 .ret_type = RET_INTEGER, 1544 .arg1_type = ARG_PTR_TO_CTX, 1545 .arg2_type = ARG_PTR_TO_MEM, 1546 .arg3_type = ARG_CONST_SIZE, 1547 }; 1548 1549 static const struct bpf_func_proto * 1550 sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1551 { 1552 switch (func_id) { 1553 case BPF_FUNC_strtol: 1554 return &bpf_strtol_proto; 1555 case BPF_FUNC_strtoul: 1556 return &bpf_strtoul_proto; 1557 case BPF_FUNC_sysctl_get_name: 1558 return &bpf_sysctl_get_name_proto; 1559 case BPF_FUNC_sysctl_get_current_value: 1560 return &bpf_sysctl_get_current_value_proto; 1561 case BPF_FUNC_sysctl_get_new_value: 1562 return &bpf_sysctl_get_new_value_proto; 1563 case BPF_FUNC_sysctl_set_new_value: 1564 return &bpf_sysctl_set_new_value_proto; 1565 default: 1566 return cgroup_base_func_proto(func_id, prog); 1567 } 1568 } 1569 1570 static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type, 1571 const struct bpf_prog *prog, 1572 struct bpf_insn_access_aux *info) 1573 { 1574 const int size_default = sizeof(__u32); 1575 1576 if (off < 0 || off + size > sizeof(struct bpf_sysctl) || off % size) 1577 return false; 1578 1579 switch (off) { 1580 case bpf_ctx_range(struct bpf_sysctl, write): 1581 if (type != BPF_READ) 1582 return false; 1583 bpf_ctx_record_field_size(info, size_default); 1584 return bpf_ctx_narrow_access_ok(off, size, size_default); 1585 case bpf_ctx_range(struct bpf_sysctl, file_pos): 1586 if (type == BPF_READ) { 1587 bpf_ctx_record_field_size(info, size_default); 1588 return bpf_ctx_narrow_access_ok(off, size, size_default); 1589 } else { 1590 return size == size_default; 1591 } 1592 default: 1593 return false; 1594 } 1595 } 1596 1597 static u32 sysctl_convert_ctx_access(enum bpf_access_type type, 1598 const struct bpf_insn *si, 1599 struct bpf_insn *insn_buf, 1600 struct bpf_prog *prog, u32 *target_size) 1601 { 1602 struct bpf_insn *insn = insn_buf; 1603 u32 read_size; 1604 1605 switch (si->off) { 1606 case offsetof(struct bpf_sysctl, write): 1607 *insn++ = BPF_LDX_MEM( 1608 BPF_SIZE(si->code), si->dst_reg, si->src_reg, 1609 bpf_target_off(struct bpf_sysctl_kern, write, 1610 sizeof_field(struct bpf_sysctl_kern, 1611 write), 1612 target_size)); 1613 break; 1614 case offsetof(struct bpf_sysctl, file_pos): 1615 /* ppos is a pointer so it should be accessed via indirect 1616 * loads and stores. Also for stores additional temporary 1617 * register is used since neither src_reg nor dst_reg can be 1618 * overridden. 1619 */ 1620 if (type == BPF_WRITE) { 1621 int treg = BPF_REG_9; 1622 1623 if (si->src_reg == treg || si->dst_reg == treg) 1624 --treg; 1625 if (si->src_reg == treg || si->dst_reg == treg) 1626 --treg; 1627 *insn++ = BPF_STX_MEM( 1628 BPF_DW, si->dst_reg, treg, 1629 offsetof(struct bpf_sysctl_kern, tmp_reg)); 1630 *insn++ = BPF_LDX_MEM( 1631 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos), 1632 treg, si->dst_reg, 1633 offsetof(struct bpf_sysctl_kern, ppos)); 1634 *insn++ = BPF_STX_MEM( 1635 BPF_SIZEOF(u32), treg, si->src_reg, 1636 bpf_ctx_narrow_access_offset( 1637 0, sizeof(u32), sizeof(loff_t))); 1638 *insn++ = BPF_LDX_MEM( 1639 BPF_DW, treg, si->dst_reg, 1640 offsetof(struct bpf_sysctl_kern, tmp_reg)); 1641 } else { 1642 *insn++ = BPF_LDX_MEM( 1643 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos), 1644 si->dst_reg, si->src_reg, 1645 offsetof(struct bpf_sysctl_kern, ppos)); 1646 read_size = bpf_size_to_bytes(BPF_SIZE(si->code)); 1647 *insn++ = BPF_LDX_MEM( 1648 BPF_SIZE(si->code), si->dst_reg, si->dst_reg, 1649 bpf_ctx_narrow_access_offset( 1650 0, read_size, sizeof(loff_t))); 1651 } 1652 *target_size = sizeof(u32); 1653 break; 1654 } 1655 1656 return insn - insn_buf; 1657 } 1658 1659 const struct bpf_verifier_ops cg_sysctl_verifier_ops = { 1660 .get_func_proto = sysctl_func_proto, 1661 .is_valid_access = sysctl_is_valid_access, 1662 .convert_ctx_access = sysctl_convert_ctx_access, 1663 }; 1664 1665 const struct bpf_prog_ops cg_sysctl_prog_ops = { 1666 }; 1667 1668 static const struct bpf_func_proto * 1669 cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1670 { 1671 switch (func_id) { 1672 #ifdef CONFIG_NET 1673 case BPF_FUNC_sk_storage_get: 1674 return &bpf_sk_storage_get_proto; 1675 case BPF_FUNC_sk_storage_delete: 1676 return &bpf_sk_storage_delete_proto; 1677 #endif 1678 #ifdef CONFIG_INET 1679 case BPF_FUNC_tcp_sock: 1680 return &bpf_tcp_sock_proto; 1681 #endif 1682 default: 1683 return cgroup_base_func_proto(func_id, prog); 1684 } 1685 } 1686 1687 static bool cg_sockopt_is_valid_access(int off, int size, 1688 enum bpf_access_type type, 1689 const struct bpf_prog *prog, 1690 struct bpf_insn_access_aux *info) 1691 { 1692 const int size_default = sizeof(__u32); 1693 1694 if (off < 0 || off >= sizeof(struct bpf_sockopt)) 1695 return false; 1696 1697 if (off % size != 0) 1698 return false; 1699 1700 if (type == BPF_WRITE) { 1701 switch (off) { 1702 case offsetof(struct bpf_sockopt, retval): 1703 if (size != size_default) 1704 return false; 1705 return prog->expected_attach_type == 1706 BPF_CGROUP_GETSOCKOPT; 1707 case offsetof(struct bpf_sockopt, optname): 1708 /* fallthrough */ 1709 case offsetof(struct bpf_sockopt, level): 1710 if (size != size_default) 1711 return false; 1712 return prog->expected_attach_type == 1713 BPF_CGROUP_SETSOCKOPT; 1714 case offsetof(struct bpf_sockopt, optlen): 1715 return size == size_default; 1716 default: 1717 return false; 1718 } 1719 } 1720 1721 switch (off) { 1722 case offsetof(struct bpf_sockopt, sk): 1723 if (size != sizeof(__u64)) 1724 return false; 1725 info->reg_type = PTR_TO_SOCKET; 1726 break; 1727 case offsetof(struct bpf_sockopt, optval): 1728 if (size != sizeof(__u64)) 1729 return false; 1730 info->reg_type = PTR_TO_PACKET; 1731 break; 1732 case offsetof(struct bpf_sockopt, optval_end): 1733 if (size != sizeof(__u64)) 1734 return false; 1735 info->reg_type = PTR_TO_PACKET_END; 1736 break; 1737 case offsetof(struct bpf_sockopt, retval): 1738 if (size != size_default) 1739 return false; 1740 return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT; 1741 default: 1742 if (size != size_default) 1743 return false; 1744 break; 1745 } 1746 return true; 1747 } 1748 1749 #define CG_SOCKOPT_ACCESS_FIELD(T, F) \ 1750 T(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F), \ 1751 si->dst_reg, si->src_reg, \ 1752 offsetof(struct bpf_sockopt_kern, F)) 1753 1754 static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type, 1755 const struct bpf_insn *si, 1756 struct bpf_insn *insn_buf, 1757 struct bpf_prog *prog, 1758 u32 *target_size) 1759 { 1760 struct bpf_insn *insn = insn_buf; 1761 1762 switch (si->off) { 1763 case offsetof(struct bpf_sockopt, sk): 1764 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, sk); 1765 break; 1766 case offsetof(struct bpf_sockopt, level): 1767 if (type == BPF_WRITE) 1768 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, level); 1769 else 1770 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, level); 1771 break; 1772 case offsetof(struct bpf_sockopt, optname): 1773 if (type == BPF_WRITE) 1774 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optname); 1775 else 1776 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optname); 1777 break; 1778 case offsetof(struct bpf_sockopt, optlen): 1779 if (type == BPF_WRITE) 1780 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optlen); 1781 else 1782 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optlen); 1783 break; 1784 case offsetof(struct bpf_sockopt, retval): 1785 if (type == BPF_WRITE) 1786 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, retval); 1787 else 1788 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, retval); 1789 break; 1790 case offsetof(struct bpf_sockopt, optval): 1791 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval); 1792 break; 1793 case offsetof(struct bpf_sockopt, optval_end): 1794 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval_end); 1795 break; 1796 } 1797 1798 return insn - insn_buf; 1799 } 1800 1801 static int cg_sockopt_get_prologue(struct bpf_insn *insn_buf, 1802 bool direct_write, 1803 const struct bpf_prog *prog) 1804 { 1805 /* Nothing to do for sockopt argument. The data is kzalloc'ated. 1806 */ 1807 return 0; 1808 } 1809 1810 const struct bpf_verifier_ops cg_sockopt_verifier_ops = { 1811 .get_func_proto = cg_sockopt_func_proto, 1812 .is_valid_access = cg_sockopt_is_valid_access, 1813 .convert_ctx_access = cg_sockopt_convert_ctx_access, 1814 .gen_prologue = cg_sockopt_get_prologue, 1815 }; 1816 1817 const struct bpf_prog_ops cg_sockopt_prog_ops = { 1818 }; 1819