1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Functions to manage eBPF programs attached to cgroups 4 * 5 * Copyright (c) 2016 Daniel Mack 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/atomic.h> 10 #include <linux/cgroup.h> 11 #include <linux/filter.h> 12 #include <linux/slab.h> 13 #include <linux/sysctl.h> 14 #include <linux/string.h> 15 #include <linux/bpf.h> 16 #include <linux/bpf-cgroup.h> 17 #include <net/sock.h> 18 #include <net/bpf_sk_storage.h> 19 20 #include "../cgroup/cgroup-internal.h" 21 22 DEFINE_STATIC_KEY_ARRAY_FALSE(cgroup_bpf_enabled_key, MAX_CGROUP_BPF_ATTACH_TYPE); 23 EXPORT_SYMBOL(cgroup_bpf_enabled_key); 24 25 void cgroup_bpf_offline(struct cgroup *cgrp) 26 { 27 cgroup_get(cgrp); 28 percpu_ref_kill(&cgrp->bpf.refcnt); 29 } 30 31 static void bpf_cgroup_storages_free(struct bpf_cgroup_storage *storages[]) 32 { 33 enum bpf_cgroup_storage_type stype; 34 35 for_each_cgroup_storage_type(stype) 36 bpf_cgroup_storage_free(storages[stype]); 37 } 38 39 static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage *storages[], 40 struct bpf_cgroup_storage *new_storages[], 41 enum bpf_attach_type type, 42 struct bpf_prog *prog, 43 struct cgroup *cgrp) 44 { 45 enum bpf_cgroup_storage_type stype; 46 struct bpf_cgroup_storage_key key; 47 struct bpf_map *map; 48 49 key.cgroup_inode_id = cgroup_id(cgrp); 50 key.attach_type = type; 51 52 for_each_cgroup_storage_type(stype) { 53 map = prog->aux->cgroup_storage[stype]; 54 if (!map) 55 continue; 56 57 storages[stype] = cgroup_storage_lookup((void *)map, &key, false); 58 if (storages[stype]) 59 continue; 60 61 storages[stype] = bpf_cgroup_storage_alloc(prog, stype); 62 if (IS_ERR(storages[stype])) { 63 bpf_cgroup_storages_free(new_storages); 64 return -ENOMEM; 65 } 66 67 new_storages[stype] = storages[stype]; 68 } 69 70 return 0; 71 } 72 73 static void bpf_cgroup_storages_assign(struct bpf_cgroup_storage *dst[], 74 struct bpf_cgroup_storage *src[]) 75 { 76 enum bpf_cgroup_storage_type stype; 77 78 for_each_cgroup_storage_type(stype) 79 dst[stype] = src[stype]; 80 } 81 82 static void bpf_cgroup_storages_link(struct bpf_cgroup_storage *storages[], 83 struct cgroup *cgrp, 84 enum bpf_attach_type attach_type) 85 { 86 enum bpf_cgroup_storage_type stype; 87 88 for_each_cgroup_storage_type(stype) 89 bpf_cgroup_storage_link(storages[stype], cgrp, attach_type); 90 } 91 92 /* Called when bpf_cgroup_link is auto-detached from dying cgroup. 93 * It drops cgroup and bpf_prog refcounts, and marks bpf_link as defunct. It 94 * doesn't free link memory, which will eventually be done by bpf_link's 95 * release() callback, when its last FD is closed. 96 */ 97 static void bpf_cgroup_link_auto_detach(struct bpf_cgroup_link *link) 98 { 99 cgroup_put(link->cgroup); 100 link->cgroup = NULL; 101 } 102 103 /** 104 * cgroup_bpf_release() - put references of all bpf programs and 105 * release all cgroup bpf data 106 * @work: work structure embedded into the cgroup to modify 107 */ 108 static void cgroup_bpf_release(struct work_struct *work) 109 { 110 struct cgroup *p, *cgrp = container_of(work, struct cgroup, 111 bpf.release_work); 112 struct bpf_prog_array *old_array; 113 struct list_head *storages = &cgrp->bpf.storages; 114 struct bpf_cgroup_storage *storage, *stmp; 115 116 unsigned int atype; 117 118 mutex_lock(&cgroup_mutex); 119 120 for (atype = 0; atype < ARRAY_SIZE(cgrp->bpf.progs); atype++) { 121 struct list_head *progs = &cgrp->bpf.progs[atype]; 122 struct bpf_prog_list *pl, *pltmp; 123 124 list_for_each_entry_safe(pl, pltmp, progs, node) { 125 list_del(&pl->node); 126 if (pl->prog) 127 bpf_prog_put(pl->prog); 128 if (pl->link) 129 bpf_cgroup_link_auto_detach(pl->link); 130 kfree(pl); 131 static_branch_dec(&cgroup_bpf_enabled_key[atype]); 132 } 133 old_array = rcu_dereference_protected( 134 cgrp->bpf.effective[atype], 135 lockdep_is_held(&cgroup_mutex)); 136 bpf_prog_array_free(old_array); 137 } 138 139 list_for_each_entry_safe(storage, stmp, storages, list_cg) { 140 bpf_cgroup_storage_unlink(storage); 141 bpf_cgroup_storage_free(storage); 142 } 143 144 mutex_unlock(&cgroup_mutex); 145 146 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) 147 cgroup_bpf_put(p); 148 149 percpu_ref_exit(&cgrp->bpf.refcnt); 150 cgroup_put(cgrp); 151 } 152 153 /** 154 * cgroup_bpf_release_fn() - callback used to schedule releasing 155 * of bpf cgroup data 156 * @ref: percpu ref counter structure 157 */ 158 static void cgroup_bpf_release_fn(struct percpu_ref *ref) 159 { 160 struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt); 161 162 INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release); 163 queue_work(system_wq, &cgrp->bpf.release_work); 164 } 165 166 /* Get underlying bpf_prog of bpf_prog_list entry, regardless if it's through 167 * link or direct prog. 168 */ 169 static struct bpf_prog *prog_list_prog(struct bpf_prog_list *pl) 170 { 171 if (pl->prog) 172 return pl->prog; 173 if (pl->link) 174 return pl->link->link.prog; 175 return NULL; 176 } 177 178 /* count number of elements in the list. 179 * it's slow but the list cannot be long 180 */ 181 static u32 prog_list_length(struct list_head *head) 182 { 183 struct bpf_prog_list *pl; 184 u32 cnt = 0; 185 186 list_for_each_entry(pl, head, node) { 187 if (!prog_list_prog(pl)) 188 continue; 189 cnt++; 190 } 191 return cnt; 192 } 193 194 /* if parent has non-overridable prog attached, 195 * disallow attaching new programs to the descendent cgroup. 196 * if parent has overridable or multi-prog, allow attaching 197 */ 198 static bool hierarchy_allows_attach(struct cgroup *cgrp, 199 enum cgroup_bpf_attach_type atype) 200 { 201 struct cgroup *p; 202 203 p = cgroup_parent(cgrp); 204 if (!p) 205 return true; 206 do { 207 u32 flags = p->bpf.flags[atype]; 208 u32 cnt; 209 210 if (flags & BPF_F_ALLOW_MULTI) 211 return true; 212 cnt = prog_list_length(&p->bpf.progs[atype]); 213 WARN_ON_ONCE(cnt > 1); 214 if (cnt == 1) 215 return !!(flags & BPF_F_ALLOW_OVERRIDE); 216 p = cgroup_parent(p); 217 } while (p); 218 return true; 219 } 220 221 /* compute a chain of effective programs for a given cgroup: 222 * start from the list of programs in this cgroup and add 223 * all parent programs. 224 * Note that parent's F_ALLOW_OVERRIDE-type program is yielding 225 * to programs in this cgroup 226 */ 227 static int compute_effective_progs(struct cgroup *cgrp, 228 enum cgroup_bpf_attach_type atype, 229 struct bpf_prog_array **array) 230 { 231 struct bpf_prog_array_item *item; 232 struct bpf_prog_array *progs; 233 struct bpf_prog_list *pl; 234 struct cgroup *p = cgrp; 235 int cnt = 0; 236 237 /* count number of effective programs by walking parents */ 238 do { 239 if (cnt == 0 || (p->bpf.flags[atype] & BPF_F_ALLOW_MULTI)) 240 cnt += prog_list_length(&p->bpf.progs[atype]); 241 p = cgroup_parent(p); 242 } while (p); 243 244 progs = bpf_prog_array_alloc(cnt, GFP_KERNEL); 245 if (!progs) 246 return -ENOMEM; 247 248 /* populate the array with effective progs */ 249 cnt = 0; 250 p = cgrp; 251 do { 252 if (cnt > 0 && !(p->bpf.flags[atype] & BPF_F_ALLOW_MULTI)) 253 continue; 254 255 list_for_each_entry(pl, &p->bpf.progs[atype], node) { 256 if (!prog_list_prog(pl)) 257 continue; 258 259 item = &progs->items[cnt]; 260 item->prog = prog_list_prog(pl); 261 bpf_cgroup_storages_assign(item->cgroup_storage, 262 pl->storage); 263 cnt++; 264 } 265 } while ((p = cgroup_parent(p))); 266 267 *array = progs; 268 return 0; 269 } 270 271 static void activate_effective_progs(struct cgroup *cgrp, 272 enum cgroup_bpf_attach_type atype, 273 struct bpf_prog_array *old_array) 274 { 275 old_array = rcu_replace_pointer(cgrp->bpf.effective[atype], old_array, 276 lockdep_is_held(&cgroup_mutex)); 277 /* free prog array after grace period, since __cgroup_bpf_run_*() 278 * might be still walking the array 279 */ 280 bpf_prog_array_free(old_array); 281 } 282 283 /** 284 * cgroup_bpf_inherit() - inherit effective programs from parent 285 * @cgrp: the cgroup to modify 286 */ 287 int cgroup_bpf_inherit(struct cgroup *cgrp) 288 { 289 /* has to use marco instead of const int, since compiler thinks 290 * that array below is variable length 291 */ 292 #define NR ARRAY_SIZE(cgrp->bpf.effective) 293 struct bpf_prog_array *arrays[NR] = {}; 294 struct cgroup *p; 295 int ret, i; 296 297 ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0, 298 GFP_KERNEL); 299 if (ret) 300 return ret; 301 302 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) 303 cgroup_bpf_get(p); 304 305 for (i = 0; i < NR; i++) 306 INIT_LIST_HEAD(&cgrp->bpf.progs[i]); 307 308 INIT_LIST_HEAD(&cgrp->bpf.storages); 309 310 for (i = 0; i < NR; i++) 311 if (compute_effective_progs(cgrp, i, &arrays[i])) 312 goto cleanup; 313 314 for (i = 0; i < NR; i++) 315 activate_effective_progs(cgrp, i, arrays[i]); 316 317 return 0; 318 cleanup: 319 for (i = 0; i < NR; i++) 320 bpf_prog_array_free(arrays[i]); 321 322 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) 323 cgroup_bpf_put(p); 324 325 percpu_ref_exit(&cgrp->bpf.refcnt); 326 327 return -ENOMEM; 328 } 329 330 static int update_effective_progs(struct cgroup *cgrp, 331 enum cgroup_bpf_attach_type atype) 332 { 333 struct cgroup_subsys_state *css; 334 int err; 335 336 /* allocate and recompute effective prog arrays */ 337 css_for_each_descendant_pre(css, &cgrp->self) { 338 struct cgroup *desc = container_of(css, struct cgroup, self); 339 340 if (percpu_ref_is_zero(&desc->bpf.refcnt)) 341 continue; 342 343 err = compute_effective_progs(desc, atype, &desc->bpf.inactive); 344 if (err) 345 goto cleanup; 346 } 347 348 /* all allocations were successful. Activate all prog arrays */ 349 css_for_each_descendant_pre(css, &cgrp->self) { 350 struct cgroup *desc = container_of(css, struct cgroup, self); 351 352 if (percpu_ref_is_zero(&desc->bpf.refcnt)) { 353 if (unlikely(desc->bpf.inactive)) { 354 bpf_prog_array_free(desc->bpf.inactive); 355 desc->bpf.inactive = NULL; 356 } 357 continue; 358 } 359 360 activate_effective_progs(desc, atype, desc->bpf.inactive); 361 desc->bpf.inactive = NULL; 362 } 363 364 return 0; 365 366 cleanup: 367 /* oom while computing effective. Free all computed effective arrays 368 * since they were not activated 369 */ 370 css_for_each_descendant_pre(css, &cgrp->self) { 371 struct cgroup *desc = container_of(css, struct cgroup, self); 372 373 bpf_prog_array_free(desc->bpf.inactive); 374 desc->bpf.inactive = NULL; 375 } 376 377 return err; 378 } 379 380 #define BPF_CGROUP_MAX_PROGS 64 381 382 static struct bpf_prog_list *find_attach_entry(struct list_head *progs, 383 struct bpf_prog *prog, 384 struct bpf_cgroup_link *link, 385 struct bpf_prog *replace_prog, 386 bool allow_multi) 387 { 388 struct bpf_prog_list *pl; 389 390 /* single-attach case */ 391 if (!allow_multi) { 392 if (list_empty(progs)) 393 return NULL; 394 return list_first_entry(progs, typeof(*pl), node); 395 } 396 397 list_for_each_entry(pl, progs, node) { 398 if (prog && pl->prog == prog && prog != replace_prog) 399 /* disallow attaching the same prog twice */ 400 return ERR_PTR(-EINVAL); 401 if (link && pl->link == link) 402 /* disallow attaching the same link twice */ 403 return ERR_PTR(-EINVAL); 404 } 405 406 /* direct prog multi-attach w/ replacement case */ 407 if (replace_prog) { 408 list_for_each_entry(pl, progs, node) { 409 if (pl->prog == replace_prog) 410 /* a match found */ 411 return pl; 412 } 413 /* prog to replace not found for cgroup */ 414 return ERR_PTR(-ENOENT); 415 } 416 417 return NULL; 418 } 419 420 /** 421 * __cgroup_bpf_attach() - Attach the program or the link to a cgroup, and 422 * propagate the change to descendants 423 * @cgrp: The cgroup which descendants to traverse 424 * @prog: A program to attach 425 * @link: A link to attach 426 * @replace_prog: Previously attached program to replace if BPF_F_REPLACE is set 427 * @type: Type of attach operation 428 * @flags: Option flags 429 * 430 * Exactly one of @prog or @link can be non-null. 431 * Must be called with cgroup_mutex held. 432 */ 433 int __cgroup_bpf_attach(struct cgroup *cgrp, 434 struct bpf_prog *prog, struct bpf_prog *replace_prog, 435 struct bpf_cgroup_link *link, 436 enum bpf_attach_type type, u32 flags) 437 { 438 u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI)); 439 struct bpf_prog *old_prog = NULL; 440 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {}; 441 struct bpf_cgroup_storage *new_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {}; 442 enum cgroup_bpf_attach_type atype; 443 struct bpf_prog_list *pl; 444 struct list_head *progs; 445 int err; 446 447 if (((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) || 448 ((flags & BPF_F_REPLACE) && !(flags & BPF_F_ALLOW_MULTI))) 449 /* invalid combination */ 450 return -EINVAL; 451 if (link && (prog || replace_prog)) 452 /* only either link or prog/replace_prog can be specified */ 453 return -EINVAL; 454 if (!!replace_prog != !!(flags & BPF_F_REPLACE)) 455 /* replace_prog implies BPF_F_REPLACE, and vice versa */ 456 return -EINVAL; 457 458 atype = to_cgroup_bpf_attach_type(type); 459 if (atype < 0) 460 return -EINVAL; 461 462 progs = &cgrp->bpf.progs[atype]; 463 464 if (!hierarchy_allows_attach(cgrp, atype)) 465 return -EPERM; 466 467 if (!list_empty(progs) && cgrp->bpf.flags[atype] != saved_flags) 468 /* Disallow attaching non-overridable on top 469 * of existing overridable in this cgroup. 470 * Disallow attaching multi-prog if overridable or none 471 */ 472 return -EPERM; 473 474 if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS) 475 return -E2BIG; 476 477 pl = find_attach_entry(progs, prog, link, replace_prog, 478 flags & BPF_F_ALLOW_MULTI); 479 if (IS_ERR(pl)) 480 return PTR_ERR(pl); 481 482 if (bpf_cgroup_storages_alloc(storage, new_storage, type, 483 prog ? : link->link.prog, cgrp)) 484 return -ENOMEM; 485 486 if (pl) { 487 old_prog = pl->prog; 488 } else { 489 pl = kmalloc(sizeof(*pl), GFP_KERNEL); 490 if (!pl) { 491 bpf_cgroup_storages_free(new_storage); 492 return -ENOMEM; 493 } 494 list_add_tail(&pl->node, progs); 495 } 496 497 pl->prog = prog; 498 pl->link = link; 499 bpf_cgroup_storages_assign(pl->storage, storage); 500 cgrp->bpf.flags[atype] = saved_flags; 501 502 err = update_effective_progs(cgrp, atype); 503 if (err) 504 goto cleanup; 505 506 if (old_prog) 507 bpf_prog_put(old_prog); 508 else 509 static_branch_inc(&cgroup_bpf_enabled_key[atype]); 510 bpf_cgroup_storages_link(new_storage, cgrp, type); 511 return 0; 512 513 cleanup: 514 if (old_prog) { 515 pl->prog = old_prog; 516 pl->link = NULL; 517 } 518 bpf_cgroup_storages_free(new_storage); 519 if (!old_prog) { 520 list_del(&pl->node); 521 kfree(pl); 522 } 523 return err; 524 } 525 526 /* Swap updated BPF program for given link in effective program arrays across 527 * all descendant cgroups. This function is guaranteed to succeed. 528 */ 529 static void replace_effective_prog(struct cgroup *cgrp, 530 enum cgroup_bpf_attach_type atype, 531 struct bpf_cgroup_link *link) 532 { 533 struct bpf_prog_array_item *item; 534 struct cgroup_subsys_state *css; 535 struct bpf_prog_array *progs; 536 struct bpf_prog_list *pl; 537 struct list_head *head; 538 struct cgroup *cg; 539 int pos; 540 541 css_for_each_descendant_pre(css, &cgrp->self) { 542 struct cgroup *desc = container_of(css, struct cgroup, self); 543 544 if (percpu_ref_is_zero(&desc->bpf.refcnt)) 545 continue; 546 547 /* find position of link in effective progs array */ 548 for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) { 549 if (pos && !(cg->bpf.flags[atype] & BPF_F_ALLOW_MULTI)) 550 continue; 551 552 head = &cg->bpf.progs[atype]; 553 list_for_each_entry(pl, head, node) { 554 if (!prog_list_prog(pl)) 555 continue; 556 if (pl->link == link) 557 goto found; 558 pos++; 559 } 560 } 561 found: 562 BUG_ON(!cg); 563 progs = rcu_dereference_protected( 564 desc->bpf.effective[atype], 565 lockdep_is_held(&cgroup_mutex)); 566 item = &progs->items[pos]; 567 WRITE_ONCE(item->prog, link->link.prog); 568 } 569 } 570 571 /** 572 * __cgroup_bpf_replace() - Replace link's program and propagate the change 573 * to descendants 574 * @cgrp: The cgroup which descendants to traverse 575 * @link: A link for which to replace BPF program 576 * @type: Type of attach operation 577 * 578 * Must be called with cgroup_mutex held. 579 */ 580 static int __cgroup_bpf_replace(struct cgroup *cgrp, 581 struct bpf_cgroup_link *link, 582 struct bpf_prog *new_prog) 583 { 584 enum cgroup_bpf_attach_type atype; 585 struct bpf_prog *old_prog; 586 struct bpf_prog_list *pl; 587 struct list_head *progs; 588 bool found = false; 589 590 atype = to_cgroup_bpf_attach_type(link->type); 591 if (atype < 0) 592 return -EINVAL; 593 594 progs = &cgrp->bpf.progs[atype]; 595 596 if (link->link.prog->type != new_prog->type) 597 return -EINVAL; 598 599 list_for_each_entry(pl, progs, node) { 600 if (pl->link == link) { 601 found = true; 602 break; 603 } 604 } 605 if (!found) 606 return -ENOENT; 607 608 old_prog = xchg(&link->link.prog, new_prog); 609 replace_effective_prog(cgrp, atype, link); 610 bpf_prog_put(old_prog); 611 return 0; 612 } 613 614 static int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *new_prog, 615 struct bpf_prog *old_prog) 616 { 617 struct bpf_cgroup_link *cg_link; 618 int ret; 619 620 cg_link = container_of(link, struct bpf_cgroup_link, link); 621 622 mutex_lock(&cgroup_mutex); 623 /* link might have been auto-released by dying cgroup, so fail */ 624 if (!cg_link->cgroup) { 625 ret = -ENOLINK; 626 goto out_unlock; 627 } 628 if (old_prog && link->prog != old_prog) { 629 ret = -EPERM; 630 goto out_unlock; 631 } 632 ret = __cgroup_bpf_replace(cg_link->cgroup, cg_link, new_prog); 633 out_unlock: 634 mutex_unlock(&cgroup_mutex); 635 return ret; 636 } 637 638 static struct bpf_prog_list *find_detach_entry(struct list_head *progs, 639 struct bpf_prog *prog, 640 struct bpf_cgroup_link *link, 641 bool allow_multi) 642 { 643 struct bpf_prog_list *pl; 644 645 if (!allow_multi) { 646 if (list_empty(progs)) 647 /* report error when trying to detach and nothing is attached */ 648 return ERR_PTR(-ENOENT); 649 650 /* to maintain backward compatibility NONE and OVERRIDE cgroups 651 * allow detaching with invalid FD (prog==NULL) in legacy mode 652 */ 653 return list_first_entry(progs, typeof(*pl), node); 654 } 655 656 if (!prog && !link) 657 /* to detach MULTI prog the user has to specify valid FD 658 * of the program or link to be detached 659 */ 660 return ERR_PTR(-EINVAL); 661 662 /* find the prog or link and detach it */ 663 list_for_each_entry(pl, progs, node) { 664 if (pl->prog == prog && pl->link == link) 665 return pl; 666 } 667 return ERR_PTR(-ENOENT); 668 } 669 670 /** 671 * purge_effective_progs() - After compute_effective_progs fails to alloc new 672 * cgrp->bpf.inactive table we can recover by 673 * recomputing the array in place. 674 * 675 * @cgrp: The cgroup which descendants to travers 676 * @prog: A program to detach or NULL 677 * @link: A link to detach or NULL 678 * @atype: Type of detach operation 679 */ 680 static void purge_effective_progs(struct cgroup *cgrp, struct bpf_prog *prog, 681 struct bpf_cgroup_link *link, 682 enum cgroup_bpf_attach_type atype) 683 { 684 struct cgroup_subsys_state *css; 685 struct bpf_prog_array *progs; 686 struct bpf_prog_list *pl; 687 struct list_head *head; 688 struct cgroup *cg; 689 int pos; 690 691 /* recompute effective prog array in place */ 692 css_for_each_descendant_pre(css, &cgrp->self) { 693 struct cgroup *desc = container_of(css, struct cgroup, self); 694 695 if (percpu_ref_is_zero(&desc->bpf.refcnt)) 696 continue; 697 698 /* find position of link or prog in effective progs array */ 699 for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) { 700 if (pos && !(cg->bpf.flags[atype] & BPF_F_ALLOW_MULTI)) 701 continue; 702 703 head = &cg->bpf.progs[atype]; 704 list_for_each_entry(pl, head, node) { 705 if (!prog_list_prog(pl)) 706 continue; 707 if (pl->prog == prog && pl->link == link) 708 goto found; 709 pos++; 710 } 711 } 712 found: 713 BUG_ON(!cg); 714 progs = rcu_dereference_protected( 715 desc->bpf.effective[atype], 716 lockdep_is_held(&cgroup_mutex)); 717 718 /* Remove the program from the array */ 719 WARN_ONCE(bpf_prog_array_delete_safe_at(progs, pos), 720 "Failed to purge a prog from array at index %d", pos); 721 } 722 } 723 724 /** 725 * __cgroup_bpf_detach() - Detach the program or link from a cgroup, and 726 * propagate the change to descendants 727 * @cgrp: The cgroup which descendants to traverse 728 * @prog: A program to detach or NULL 729 * @prog: A link to detach or NULL 730 * @type: Type of detach operation 731 * 732 * At most one of @prog or @link can be non-NULL. 733 * Must be called with cgroup_mutex held. 734 */ 735 int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, 736 struct bpf_cgroup_link *link, enum bpf_attach_type type) 737 { 738 enum cgroup_bpf_attach_type atype; 739 struct bpf_prog *old_prog; 740 struct bpf_prog_list *pl; 741 struct list_head *progs; 742 u32 flags; 743 744 atype = to_cgroup_bpf_attach_type(type); 745 if (atype < 0) 746 return -EINVAL; 747 748 progs = &cgrp->bpf.progs[atype]; 749 flags = cgrp->bpf.flags[atype]; 750 751 if (prog && link) 752 /* only one of prog or link can be specified */ 753 return -EINVAL; 754 755 pl = find_detach_entry(progs, prog, link, flags & BPF_F_ALLOW_MULTI); 756 if (IS_ERR(pl)) 757 return PTR_ERR(pl); 758 759 /* mark it deleted, so it's ignored while recomputing effective */ 760 old_prog = pl->prog; 761 pl->prog = NULL; 762 pl->link = NULL; 763 764 if (update_effective_progs(cgrp, atype)) { 765 /* if update effective array failed replace the prog with a dummy prog*/ 766 pl->prog = old_prog; 767 pl->link = link; 768 purge_effective_progs(cgrp, old_prog, link, atype); 769 } 770 771 /* now can actually delete it from this cgroup list */ 772 list_del(&pl->node); 773 kfree(pl); 774 if (list_empty(progs)) 775 /* last program was detached, reset flags to zero */ 776 cgrp->bpf.flags[atype] = 0; 777 if (old_prog) 778 bpf_prog_put(old_prog); 779 static_branch_dec(&cgroup_bpf_enabled_key[atype]); 780 return 0; 781 } 782 783 /* Must be called with cgroup_mutex held to avoid races. */ 784 int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, 785 union bpf_attr __user *uattr) 786 { 787 __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); 788 enum bpf_attach_type type = attr->query.attach_type; 789 enum cgroup_bpf_attach_type atype; 790 struct bpf_prog_array *effective; 791 struct list_head *progs; 792 struct bpf_prog *prog; 793 int cnt, ret = 0, i; 794 u32 flags; 795 796 atype = to_cgroup_bpf_attach_type(type); 797 if (atype < 0) 798 return -EINVAL; 799 800 progs = &cgrp->bpf.progs[atype]; 801 flags = cgrp->bpf.flags[atype]; 802 803 effective = rcu_dereference_protected(cgrp->bpf.effective[atype], 804 lockdep_is_held(&cgroup_mutex)); 805 806 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) 807 cnt = bpf_prog_array_length(effective); 808 else 809 cnt = prog_list_length(progs); 810 811 if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags))) 812 return -EFAULT; 813 if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt))) 814 return -EFAULT; 815 if (attr->query.prog_cnt == 0 || !prog_ids || !cnt) 816 /* return early if user requested only program count + flags */ 817 return 0; 818 if (attr->query.prog_cnt < cnt) { 819 cnt = attr->query.prog_cnt; 820 ret = -ENOSPC; 821 } 822 823 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) { 824 return bpf_prog_array_copy_to_user(effective, prog_ids, cnt); 825 } else { 826 struct bpf_prog_list *pl; 827 u32 id; 828 829 i = 0; 830 list_for_each_entry(pl, progs, node) { 831 prog = prog_list_prog(pl); 832 id = prog->aux->id; 833 if (copy_to_user(prog_ids + i, &id, sizeof(id))) 834 return -EFAULT; 835 if (++i == cnt) 836 break; 837 } 838 } 839 return ret; 840 } 841 842 int cgroup_bpf_prog_attach(const union bpf_attr *attr, 843 enum bpf_prog_type ptype, struct bpf_prog *prog) 844 { 845 struct bpf_prog *replace_prog = NULL; 846 struct cgroup *cgrp; 847 int ret; 848 849 cgrp = cgroup_get_from_fd(attr->target_fd); 850 if (IS_ERR(cgrp)) 851 return PTR_ERR(cgrp); 852 853 if ((attr->attach_flags & BPF_F_ALLOW_MULTI) && 854 (attr->attach_flags & BPF_F_REPLACE)) { 855 replace_prog = bpf_prog_get_type(attr->replace_bpf_fd, ptype); 856 if (IS_ERR(replace_prog)) { 857 cgroup_put(cgrp); 858 return PTR_ERR(replace_prog); 859 } 860 } 861 862 ret = cgroup_bpf_attach(cgrp, prog, replace_prog, NULL, 863 attr->attach_type, attr->attach_flags); 864 865 if (replace_prog) 866 bpf_prog_put(replace_prog); 867 cgroup_put(cgrp); 868 return ret; 869 } 870 871 int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) 872 { 873 struct bpf_prog *prog; 874 struct cgroup *cgrp; 875 int ret; 876 877 cgrp = cgroup_get_from_fd(attr->target_fd); 878 if (IS_ERR(cgrp)) 879 return PTR_ERR(cgrp); 880 881 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 882 if (IS_ERR(prog)) 883 prog = NULL; 884 885 ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type); 886 if (prog) 887 bpf_prog_put(prog); 888 889 cgroup_put(cgrp); 890 return ret; 891 } 892 893 static void bpf_cgroup_link_release(struct bpf_link *link) 894 { 895 struct bpf_cgroup_link *cg_link = 896 container_of(link, struct bpf_cgroup_link, link); 897 struct cgroup *cg; 898 899 /* link might have been auto-detached by dying cgroup already, 900 * in that case our work is done here 901 */ 902 if (!cg_link->cgroup) 903 return; 904 905 mutex_lock(&cgroup_mutex); 906 907 /* re-check cgroup under lock again */ 908 if (!cg_link->cgroup) { 909 mutex_unlock(&cgroup_mutex); 910 return; 911 } 912 913 WARN_ON(__cgroup_bpf_detach(cg_link->cgroup, NULL, cg_link, 914 cg_link->type)); 915 916 cg = cg_link->cgroup; 917 cg_link->cgroup = NULL; 918 919 mutex_unlock(&cgroup_mutex); 920 921 cgroup_put(cg); 922 } 923 924 static void bpf_cgroup_link_dealloc(struct bpf_link *link) 925 { 926 struct bpf_cgroup_link *cg_link = 927 container_of(link, struct bpf_cgroup_link, link); 928 929 kfree(cg_link); 930 } 931 932 static int bpf_cgroup_link_detach(struct bpf_link *link) 933 { 934 bpf_cgroup_link_release(link); 935 936 return 0; 937 } 938 939 static void bpf_cgroup_link_show_fdinfo(const struct bpf_link *link, 940 struct seq_file *seq) 941 { 942 struct bpf_cgroup_link *cg_link = 943 container_of(link, struct bpf_cgroup_link, link); 944 u64 cg_id = 0; 945 946 mutex_lock(&cgroup_mutex); 947 if (cg_link->cgroup) 948 cg_id = cgroup_id(cg_link->cgroup); 949 mutex_unlock(&cgroup_mutex); 950 951 seq_printf(seq, 952 "cgroup_id:\t%llu\n" 953 "attach_type:\t%d\n", 954 cg_id, 955 cg_link->type); 956 } 957 958 static int bpf_cgroup_link_fill_link_info(const struct bpf_link *link, 959 struct bpf_link_info *info) 960 { 961 struct bpf_cgroup_link *cg_link = 962 container_of(link, struct bpf_cgroup_link, link); 963 u64 cg_id = 0; 964 965 mutex_lock(&cgroup_mutex); 966 if (cg_link->cgroup) 967 cg_id = cgroup_id(cg_link->cgroup); 968 mutex_unlock(&cgroup_mutex); 969 970 info->cgroup.cgroup_id = cg_id; 971 info->cgroup.attach_type = cg_link->type; 972 return 0; 973 } 974 975 static const struct bpf_link_ops bpf_cgroup_link_lops = { 976 .release = bpf_cgroup_link_release, 977 .dealloc = bpf_cgroup_link_dealloc, 978 .detach = bpf_cgroup_link_detach, 979 .update_prog = cgroup_bpf_replace, 980 .show_fdinfo = bpf_cgroup_link_show_fdinfo, 981 .fill_link_info = bpf_cgroup_link_fill_link_info, 982 }; 983 984 int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 985 { 986 struct bpf_link_primer link_primer; 987 struct bpf_cgroup_link *link; 988 struct cgroup *cgrp; 989 int err; 990 991 if (attr->link_create.flags) 992 return -EINVAL; 993 994 cgrp = cgroup_get_from_fd(attr->link_create.target_fd); 995 if (IS_ERR(cgrp)) 996 return PTR_ERR(cgrp); 997 998 link = kzalloc(sizeof(*link), GFP_USER); 999 if (!link) { 1000 err = -ENOMEM; 1001 goto out_put_cgroup; 1002 } 1003 bpf_link_init(&link->link, BPF_LINK_TYPE_CGROUP, &bpf_cgroup_link_lops, 1004 prog); 1005 link->cgroup = cgrp; 1006 link->type = attr->link_create.attach_type; 1007 1008 err = bpf_link_prime(&link->link, &link_primer); 1009 if (err) { 1010 kfree(link); 1011 goto out_put_cgroup; 1012 } 1013 1014 err = cgroup_bpf_attach(cgrp, NULL, NULL, link, 1015 link->type, BPF_F_ALLOW_MULTI); 1016 if (err) { 1017 bpf_link_cleanup(&link_primer); 1018 goto out_put_cgroup; 1019 } 1020 1021 return bpf_link_settle(&link_primer); 1022 1023 out_put_cgroup: 1024 cgroup_put(cgrp); 1025 return err; 1026 } 1027 1028 int cgroup_bpf_prog_query(const union bpf_attr *attr, 1029 union bpf_attr __user *uattr) 1030 { 1031 struct cgroup *cgrp; 1032 int ret; 1033 1034 cgrp = cgroup_get_from_fd(attr->query.target_fd); 1035 if (IS_ERR(cgrp)) 1036 return PTR_ERR(cgrp); 1037 1038 ret = cgroup_bpf_query(cgrp, attr, uattr); 1039 1040 cgroup_put(cgrp); 1041 return ret; 1042 } 1043 1044 /** 1045 * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering 1046 * @sk: The socket sending or receiving traffic 1047 * @skb: The skb that is being sent or received 1048 * @type: The type of program to be exectuted 1049 * 1050 * If no socket is passed, or the socket is not of type INET or INET6, 1051 * this function does nothing and returns 0. 1052 * 1053 * The program type passed in via @type must be suitable for network 1054 * filtering. No further check is performed to assert that. 1055 * 1056 * For egress packets, this function can return: 1057 * NET_XMIT_SUCCESS (0) - continue with packet output 1058 * NET_XMIT_DROP (1) - drop packet and notify TCP to call cwr 1059 * NET_XMIT_CN (2) - continue with packet output and notify TCP 1060 * to call cwr 1061 * -EPERM - drop packet 1062 * 1063 * For ingress packets, this function will return -EPERM if any 1064 * attached program was found and if it returned != 1 during execution. 1065 * Otherwise 0 is returned. 1066 */ 1067 int __cgroup_bpf_run_filter_skb(struct sock *sk, 1068 struct sk_buff *skb, 1069 enum cgroup_bpf_attach_type atype) 1070 { 1071 unsigned int offset = skb->data - skb_network_header(skb); 1072 struct sock *save_sk; 1073 void *saved_data_end; 1074 struct cgroup *cgrp; 1075 int ret; 1076 1077 if (!sk || !sk_fullsock(sk)) 1078 return 0; 1079 1080 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6) 1081 return 0; 1082 1083 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1084 save_sk = skb->sk; 1085 skb->sk = sk; 1086 __skb_push(skb, offset); 1087 1088 /* compute pointers for the bpf prog */ 1089 bpf_compute_and_save_data_end(skb, &saved_data_end); 1090 1091 if (atype == CGROUP_INET_EGRESS) { 1092 ret = BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY( 1093 cgrp->bpf.effective[atype], skb, __bpf_prog_run_save_cb); 1094 } else { 1095 ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], skb, 1096 __bpf_prog_run_save_cb); 1097 ret = (ret == 1 ? 0 : -EPERM); 1098 } 1099 bpf_restore_data_end(skb, saved_data_end); 1100 __skb_pull(skb, offset); 1101 skb->sk = save_sk; 1102 1103 return ret; 1104 } 1105 EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb); 1106 1107 /** 1108 * __cgroup_bpf_run_filter_sk() - Run a program on a sock 1109 * @sk: sock structure to manipulate 1110 * @type: The type of program to be exectuted 1111 * 1112 * socket is passed is expected to be of type INET or INET6. 1113 * 1114 * The program type passed in via @type must be suitable for sock 1115 * filtering. No further check is performed to assert that. 1116 * 1117 * This function will return %-EPERM if any if an attached program was found 1118 * and if it returned != 1 during execution. In all other cases, 0 is returned. 1119 */ 1120 int __cgroup_bpf_run_filter_sk(struct sock *sk, 1121 enum cgroup_bpf_attach_type atype) 1122 { 1123 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1124 int ret; 1125 1126 ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], sk, bpf_prog_run); 1127 return ret == 1 ? 0 : -EPERM; 1128 } 1129 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk); 1130 1131 /** 1132 * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and 1133 * provided by user sockaddr 1134 * @sk: sock struct that will use sockaddr 1135 * @uaddr: sockaddr struct provided by user 1136 * @type: The type of program to be exectuted 1137 * @t_ctx: Pointer to attach type specific context 1138 * @flags: Pointer to u32 which contains higher bits of BPF program 1139 * return value (OR'ed together). 1140 * 1141 * socket is expected to be of type INET or INET6. 1142 * 1143 * This function will return %-EPERM if an attached program is found and 1144 * returned value != 1 during execution. In all other cases, 0 is returned. 1145 */ 1146 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, 1147 struct sockaddr *uaddr, 1148 enum cgroup_bpf_attach_type atype, 1149 void *t_ctx, 1150 u32 *flags) 1151 { 1152 struct bpf_sock_addr_kern ctx = { 1153 .sk = sk, 1154 .uaddr = uaddr, 1155 .t_ctx = t_ctx, 1156 }; 1157 struct sockaddr_storage unspec; 1158 struct cgroup *cgrp; 1159 int ret; 1160 1161 /* Check socket family since not all sockets represent network 1162 * endpoint (e.g. AF_UNIX). 1163 */ 1164 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6) 1165 return 0; 1166 1167 if (!ctx.uaddr) { 1168 memset(&unspec, 0, sizeof(unspec)); 1169 ctx.uaddr = (struct sockaddr *)&unspec; 1170 } 1171 1172 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1173 ret = BPF_PROG_RUN_ARRAY_CG_FLAGS(cgrp->bpf.effective[atype], &ctx, 1174 bpf_prog_run, flags); 1175 1176 return ret == 1 ? 0 : -EPERM; 1177 } 1178 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr); 1179 1180 /** 1181 * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock 1182 * @sk: socket to get cgroup from 1183 * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains 1184 * sk with connection information (IP addresses, etc.) May not contain 1185 * cgroup info if it is a req sock. 1186 * @type: The type of program to be exectuted 1187 * 1188 * socket passed is expected to be of type INET or INET6. 1189 * 1190 * The program type passed in via @type must be suitable for sock_ops 1191 * filtering. No further check is performed to assert that. 1192 * 1193 * This function will return %-EPERM if any if an attached program was found 1194 * and if it returned != 1 during execution. In all other cases, 0 is returned. 1195 */ 1196 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, 1197 struct bpf_sock_ops_kern *sock_ops, 1198 enum cgroup_bpf_attach_type atype) 1199 { 1200 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1201 int ret; 1202 1203 ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], sock_ops, 1204 bpf_prog_run); 1205 return ret == 1 ? 0 : -EPERM; 1206 } 1207 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops); 1208 1209 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, 1210 short access, enum cgroup_bpf_attach_type atype) 1211 { 1212 struct cgroup *cgrp; 1213 struct bpf_cgroup_dev_ctx ctx = { 1214 .access_type = (access << 16) | dev_type, 1215 .major = major, 1216 .minor = minor, 1217 }; 1218 int allow; 1219 1220 rcu_read_lock(); 1221 cgrp = task_dfl_cgroup(current); 1222 allow = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], &ctx, 1223 bpf_prog_run); 1224 rcu_read_unlock(); 1225 1226 return !allow; 1227 } 1228 1229 static const struct bpf_func_proto * 1230 cgroup_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1231 { 1232 switch (func_id) { 1233 case BPF_FUNC_get_current_uid_gid: 1234 return &bpf_get_current_uid_gid_proto; 1235 case BPF_FUNC_get_local_storage: 1236 return &bpf_get_local_storage_proto; 1237 case BPF_FUNC_get_current_cgroup_id: 1238 return &bpf_get_current_cgroup_id_proto; 1239 case BPF_FUNC_perf_event_output: 1240 return &bpf_event_output_data_proto; 1241 default: 1242 return bpf_base_func_proto(func_id); 1243 } 1244 } 1245 1246 static const struct bpf_func_proto * 1247 cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1248 { 1249 return cgroup_base_func_proto(func_id, prog); 1250 } 1251 1252 static bool cgroup_dev_is_valid_access(int off, int size, 1253 enum bpf_access_type type, 1254 const struct bpf_prog *prog, 1255 struct bpf_insn_access_aux *info) 1256 { 1257 const int size_default = sizeof(__u32); 1258 1259 if (type == BPF_WRITE) 1260 return false; 1261 1262 if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx)) 1263 return false; 1264 /* The verifier guarantees that size > 0. */ 1265 if (off % size != 0) 1266 return false; 1267 1268 switch (off) { 1269 case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type): 1270 bpf_ctx_record_field_size(info, size_default); 1271 if (!bpf_ctx_narrow_access_ok(off, size, size_default)) 1272 return false; 1273 break; 1274 default: 1275 if (size != size_default) 1276 return false; 1277 } 1278 1279 return true; 1280 } 1281 1282 const struct bpf_prog_ops cg_dev_prog_ops = { 1283 }; 1284 1285 const struct bpf_verifier_ops cg_dev_verifier_ops = { 1286 .get_func_proto = cgroup_dev_func_proto, 1287 .is_valid_access = cgroup_dev_is_valid_access, 1288 }; 1289 1290 /** 1291 * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl 1292 * 1293 * @head: sysctl table header 1294 * @table: sysctl table 1295 * @write: sysctl is being read (= 0) or written (= 1) 1296 * @buf: pointer to buffer (in and out) 1297 * @pcount: value-result argument: value is size of buffer pointed to by @buf, 1298 * result is size of @new_buf if program set new value, initial value 1299 * otherwise 1300 * @ppos: value-result argument: value is position at which read from or write 1301 * to sysctl is happening, result is new position if program overrode it, 1302 * initial value otherwise 1303 * @type: type of program to be executed 1304 * 1305 * Program is run when sysctl is being accessed, either read or written, and 1306 * can allow or deny such access. 1307 * 1308 * This function will return %-EPERM if an attached program is found and 1309 * returned value != 1 during execution. In all other cases 0 is returned. 1310 */ 1311 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, 1312 struct ctl_table *table, int write, 1313 char **buf, size_t *pcount, loff_t *ppos, 1314 enum cgroup_bpf_attach_type atype) 1315 { 1316 struct bpf_sysctl_kern ctx = { 1317 .head = head, 1318 .table = table, 1319 .write = write, 1320 .ppos = ppos, 1321 .cur_val = NULL, 1322 .cur_len = PAGE_SIZE, 1323 .new_val = NULL, 1324 .new_len = 0, 1325 .new_updated = 0, 1326 }; 1327 struct cgroup *cgrp; 1328 loff_t pos = 0; 1329 int ret; 1330 1331 ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL); 1332 if (!ctx.cur_val || 1333 table->proc_handler(table, 0, ctx.cur_val, &ctx.cur_len, &pos)) { 1334 /* Let BPF program decide how to proceed. */ 1335 ctx.cur_len = 0; 1336 } 1337 1338 if (write && *buf && *pcount) { 1339 /* BPF program should be able to override new value with a 1340 * buffer bigger than provided by user. 1341 */ 1342 ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL); 1343 ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount); 1344 if (ctx.new_val) { 1345 memcpy(ctx.new_val, *buf, ctx.new_len); 1346 } else { 1347 /* Let BPF program decide how to proceed. */ 1348 ctx.new_len = 0; 1349 } 1350 } 1351 1352 rcu_read_lock(); 1353 cgrp = task_dfl_cgroup(current); 1354 ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], &ctx, bpf_prog_run); 1355 rcu_read_unlock(); 1356 1357 kfree(ctx.cur_val); 1358 1359 if (ret == 1 && ctx.new_updated) { 1360 kfree(*buf); 1361 *buf = ctx.new_val; 1362 *pcount = ctx.new_len; 1363 } else { 1364 kfree(ctx.new_val); 1365 } 1366 1367 return ret == 1 ? 0 : -EPERM; 1368 } 1369 1370 #ifdef CONFIG_NET 1371 static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp, 1372 enum cgroup_bpf_attach_type attach_type) 1373 { 1374 struct bpf_prog_array *prog_array; 1375 bool empty; 1376 1377 rcu_read_lock(); 1378 prog_array = rcu_dereference(cgrp->bpf.effective[attach_type]); 1379 empty = bpf_prog_array_is_empty(prog_array); 1380 rcu_read_unlock(); 1381 1382 return empty; 1383 } 1384 1385 static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen, 1386 struct bpf_sockopt_buf *buf) 1387 { 1388 if (unlikely(max_optlen < 0)) 1389 return -EINVAL; 1390 1391 if (unlikely(max_optlen > PAGE_SIZE)) { 1392 /* We don't expose optvals that are greater than PAGE_SIZE 1393 * to the BPF program. 1394 */ 1395 max_optlen = PAGE_SIZE; 1396 } 1397 1398 if (max_optlen <= sizeof(buf->data)) { 1399 /* When the optval fits into BPF_SOCKOPT_KERN_BUF_SIZE 1400 * bytes avoid the cost of kzalloc. 1401 */ 1402 ctx->optval = buf->data; 1403 ctx->optval_end = ctx->optval + max_optlen; 1404 return max_optlen; 1405 } 1406 1407 ctx->optval = kzalloc(max_optlen, GFP_USER); 1408 if (!ctx->optval) 1409 return -ENOMEM; 1410 1411 ctx->optval_end = ctx->optval + max_optlen; 1412 1413 return max_optlen; 1414 } 1415 1416 static void sockopt_free_buf(struct bpf_sockopt_kern *ctx, 1417 struct bpf_sockopt_buf *buf) 1418 { 1419 if (ctx->optval == buf->data) 1420 return; 1421 kfree(ctx->optval); 1422 } 1423 1424 static bool sockopt_buf_allocated(struct bpf_sockopt_kern *ctx, 1425 struct bpf_sockopt_buf *buf) 1426 { 1427 return ctx->optval != buf->data; 1428 } 1429 1430 int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level, 1431 int *optname, char __user *optval, 1432 int *optlen, char **kernel_optval) 1433 { 1434 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1435 struct bpf_sockopt_buf buf = {}; 1436 struct bpf_sockopt_kern ctx = { 1437 .sk = sk, 1438 .level = *level, 1439 .optname = *optname, 1440 }; 1441 int ret, max_optlen; 1442 1443 /* Opportunistic check to see whether we have any BPF program 1444 * attached to the hook so we don't waste time allocating 1445 * memory and locking the socket. 1446 */ 1447 if (__cgroup_bpf_prog_array_is_empty(cgrp, CGROUP_SETSOCKOPT)) 1448 return 0; 1449 1450 /* Allocate a bit more than the initial user buffer for 1451 * BPF program. The canonical use case is overriding 1452 * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic). 1453 */ 1454 max_optlen = max_t(int, 16, *optlen); 1455 1456 max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf); 1457 if (max_optlen < 0) 1458 return max_optlen; 1459 1460 ctx.optlen = *optlen; 1461 1462 if (copy_from_user(ctx.optval, optval, min(*optlen, max_optlen)) != 0) { 1463 ret = -EFAULT; 1464 goto out; 1465 } 1466 1467 lock_sock(sk); 1468 ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[CGROUP_SETSOCKOPT], 1469 &ctx, bpf_prog_run); 1470 release_sock(sk); 1471 1472 if (!ret) { 1473 ret = -EPERM; 1474 goto out; 1475 } 1476 1477 if (ctx.optlen == -1) { 1478 /* optlen set to -1, bypass kernel */ 1479 ret = 1; 1480 } else if (ctx.optlen > max_optlen || ctx.optlen < -1) { 1481 /* optlen is out of bounds */ 1482 ret = -EFAULT; 1483 } else { 1484 /* optlen within bounds, run kernel handler */ 1485 ret = 0; 1486 1487 /* export any potential modifications */ 1488 *level = ctx.level; 1489 *optname = ctx.optname; 1490 1491 /* optlen == 0 from BPF indicates that we should 1492 * use original userspace data. 1493 */ 1494 if (ctx.optlen != 0) { 1495 *optlen = ctx.optlen; 1496 /* We've used bpf_sockopt_kern->buf as an intermediary 1497 * storage, but the BPF program indicates that we need 1498 * to pass this data to the kernel setsockopt handler. 1499 * No way to export on-stack buf, have to allocate a 1500 * new buffer. 1501 */ 1502 if (!sockopt_buf_allocated(&ctx, &buf)) { 1503 void *p = kmalloc(ctx.optlen, GFP_USER); 1504 1505 if (!p) { 1506 ret = -ENOMEM; 1507 goto out; 1508 } 1509 memcpy(p, ctx.optval, ctx.optlen); 1510 *kernel_optval = p; 1511 } else { 1512 *kernel_optval = ctx.optval; 1513 } 1514 /* export and don't free sockopt buf */ 1515 return 0; 1516 } 1517 } 1518 1519 out: 1520 sockopt_free_buf(&ctx, &buf); 1521 return ret; 1522 } 1523 1524 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, 1525 int optname, char __user *optval, 1526 int __user *optlen, int max_optlen, 1527 int retval) 1528 { 1529 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1530 struct bpf_sockopt_buf buf = {}; 1531 struct bpf_sockopt_kern ctx = { 1532 .sk = sk, 1533 .level = level, 1534 .optname = optname, 1535 .retval = retval, 1536 }; 1537 int ret; 1538 1539 /* Opportunistic check to see whether we have any BPF program 1540 * attached to the hook so we don't waste time allocating 1541 * memory and locking the socket. 1542 */ 1543 if (__cgroup_bpf_prog_array_is_empty(cgrp, CGROUP_GETSOCKOPT)) 1544 return retval; 1545 1546 ctx.optlen = max_optlen; 1547 1548 max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf); 1549 if (max_optlen < 0) 1550 return max_optlen; 1551 1552 if (!retval) { 1553 /* If kernel getsockopt finished successfully, 1554 * copy whatever was returned to the user back 1555 * into our temporary buffer. Set optlen to the 1556 * one that kernel returned as well to let 1557 * BPF programs inspect the value. 1558 */ 1559 1560 if (get_user(ctx.optlen, optlen)) { 1561 ret = -EFAULT; 1562 goto out; 1563 } 1564 1565 if (ctx.optlen < 0) { 1566 ret = -EFAULT; 1567 goto out; 1568 } 1569 1570 if (copy_from_user(ctx.optval, optval, 1571 min(ctx.optlen, max_optlen)) != 0) { 1572 ret = -EFAULT; 1573 goto out; 1574 } 1575 } 1576 1577 lock_sock(sk); 1578 ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[CGROUP_GETSOCKOPT], 1579 &ctx, bpf_prog_run); 1580 release_sock(sk); 1581 1582 if (!ret) { 1583 ret = -EPERM; 1584 goto out; 1585 } 1586 1587 if (ctx.optlen > max_optlen || ctx.optlen < 0) { 1588 ret = -EFAULT; 1589 goto out; 1590 } 1591 1592 /* BPF programs only allowed to set retval to 0, not some 1593 * arbitrary value. 1594 */ 1595 if (ctx.retval != 0 && ctx.retval != retval) { 1596 ret = -EFAULT; 1597 goto out; 1598 } 1599 1600 if (ctx.optlen != 0) { 1601 if (copy_to_user(optval, ctx.optval, ctx.optlen) || 1602 put_user(ctx.optlen, optlen)) { 1603 ret = -EFAULT; 1604 goto out; 1605 } 1606 } 1607 1608 ret = ctx.retval; 1609 1610 out: 1611 sockopt_free_buf(&ctx, &buf); 1612 return ret; 1613 } 1614 1615 int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level, 1616 int optname, void *optval, 1617 int *optlen, int retval) 1618 { 1619 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1620 struct bpf_sockopt_kern ctx = { 1621 .sk = sk, 1622 .level = level, 1623 .optname = optname, 1624 .retval = retval, 1625 .optlen = *optlen, 1626 .optval = optval, 1627 .optval_end = optval + *optlen, 1628 }; 1629 int ret; 1630 1631 /* Note that __cgroup_bpf_run_filter_getsockopt doesn't copy 1632 * user data back into BPF buffer when reval != 0. This is 1633 * done as an optimization to avoid extra copy, assuming 1634 * kernel won't populate the data in case of an error. 1635 * Here we always pass the data and memset() should 1636 * be called if that data shouldn't be "exported". 1637 */ 1638 1639 ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[CGROUP_GETSOCKOPT], 1640 &ctx, bpf_prog_run); 1641 if (!ret) 1642 return -EPERM; 1643 1644 if (ctx.optlen > *optlen) 1645 return -EFAULT; 1646 1647 /* BPF programs only allowed to set retval to 0, not some 1648 * arbitrary value. 1649 */ 1650 if (ctx.retval != 0 && ctx.retval != retval) 1651 return -EFAULT; 1652 1653 /* BPF programs can shrink the buffer, export the modifications. 1654 */ 1655 if (ctx.optlen != 0) 1656 *optlen = ctx.optlen; 1657 1658 return ctx.retval; 1659 } 1660 #endif 1661 1662 static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp, 1663 size_t *lenp) 1664 { 1665 ssize_t tmp_ret = 0, ret; 1666 1667 if (dir->header.parent) { 1668 tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp); 1669 if (tmp_ret < 0) 1670 return tmp_ret; 1671 } 1672 1673 ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp); 1674 if (ret < 0) 1675 return ret; 1676 *bufp += ret; 1677 *lenp -= ret; 1678 ret += tmp_ret; 1679 1680 /* Avoid leading slash. */ 1681 if (!ret) 1682 return ret; 1683 1684 tmp_ret = strscpy(*bufp, "/", *lenp); 1685 if (tmp_ret < 0) 1686 return tmp_ret; 1687 *bufp += tmp_ret; 1688 *lenp -= tmp_ret; 1689 1690 return ret + tmp_ret; 1691 } 1692 1693 BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf, 1694 size_t, buf_len, u64, flags) 1695 { 1696 ssize_t tmp_ret = 0, ret; 1697 1698 if (!buf) 1699 return -EINVAL; 1700 1701 if (!(flags & BPF_F_SYSCTL_BASE_NAME)) { 1702 if (!ctx->head) 1703 return -EINVAL; 1704 tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len); 1705 if (tmp_ret < 0) 1706 return tmp_ret; 1707 } 1708 1709 ret = strscpy(buf, ctx->table->procname, buf_len); 1710 1711 return ret < 0 ? ret : tmp_ret + ret; 1712 } 1713 1714 static const struct bpf_func_proto bpf_sysctl_get_name_proto = { 1715 .func = bpf_sysctl_get_name, 1716 .gpl_only = false, 1717 .ret_type = RET_INTEGER, 1718 .arg1_type = ARG_PTR_TO_CTX, 1719 .arg2_type = ARG_PTR_TO_MEM, 1720 .arg3_type = ARG_CONST_SIZE, 1721 .arg4_type = ARG_ANYTHING, 1722 }; 1723 1724 static int copy_sysctl_value(char *dst, size_t dst_len, char *src, 1725 size_t src_len) 1726 { 1727 if (!dst) 1728 return -EINVAL; 1729 1730 if (!dst_len) 1731 return -E2BIG; 1732 1733 if (!src || !src_len) { 1734 memset(dst, 0, dst_len); 1735 return -EINVAL; 1736 } 1737 1738 memcpy(dst, src, min(dst_len, src_len)); 1739 1740 if (dst_len > src_len) { 1741 memset(dst + src_len, '\0', dst_len - src_len); 1742 return src_len; 1743 } 1744 1745 dst[dst_len - 1] = '\0'; 1746 1747 return -E2BIG; 1748 } 1749 1750 BPF_CALL_3(bpf_sysctl_get_current_value, struct bpf_sysctl_kern *, ctx, 1751 char *, buf, size_t, buf_len) 1752 { 1753 return copy_sysctl_value(buf, buf_len, ctx->cur_val, ctx->cur_len); 1754 } 1755 1756 static const struct bpf_func_proto bpf_sysctl_get_current_value_proto = { 1757 .func = bpf_sysctl_get_current_value, 1758 .gpl_only = false, 1759 .ret_type = RET_INTEGER, 1760 .arg1_type = ARG_PTR_TO_CTX, 1761 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1762 .arg3_type = ARG_CONST_SIZE, 1763 }; 1764 1765 BPF_CALL_3(bpf_sysctl_get_new_value, struct bpf_sysctl_kern *, ctx, char *, buf, 1766 size_t, buf_len) 1767 { 1768 if (!ctx->write) { 1769 if (buf && buf_len) 1770 memset(buf, '\0', buf_len); 1771 return -EINVAL; 1772 } 1773 return copy_sysctl_value(buf, buf_len, ctx->new_val, ctx->new_len); 1774 } 1775 1776 static const struct bpf_func_proto bpf_sysctl_get_new_value_proto = { 1777 .func = bpf_sysctl_get_new_value, 1778 .gpl_only = false, 1779 .ret_type = RET_INTEGER, 1780 .arg1_type = ARG_PTR_TO_CTX, 1781 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1782 .arg3_type = ARG_CONST_SIZE, 1783 }; 1784 1785 BPF_CALL_3(bpf_sysctl_set_new_value, struct bpf_sysctl_kern *, ctx, 1786 const char *, buf, size_t, buf_len) 1787 { 1788 if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len) 1789 return -EINVAL; 1790 1791 if (buf_len > PAGE_SIZE - 1) 1792 return -E2BIG; 1793 1794 memcpy(ctx->new_val, buf, buf_len); 1795 ctx->new_len = buf_len; 1796 ctx->new_updated = 1; 1797 1798 return 0; 1799 } 1800 1801 static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = { 1802 .func = bpf_sysctl_set_new_value, 1803 .gpl_only = false, 1804 .ret_type = RET_INTEGER, 1805 .arg1_type = ARG_PTR_TO_CTX, 1806 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1807 .arg3_type = ARG_CONST_SIZE, 1808 }; 1809 1810 static const struct bpf_func_proto * 1811 sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1812 { 1813 switch (func_id) { 1814 case BPF_FUNC_strtol: 1815 return &bpf_strtol_proto; 1816 case BPF_FUNC_strtoul: 1817 return &bpf_strtoul_proto; 1818 case BPF_FUNC_sysctl_get_name: 1819 return &bpf_sysctl_get_name_proto; 1820 case BPF_FUNC_sysctl_get_current_value: 1821 return &bpf_sysctl_get_current_value_proto; 1822 case BPF_FUNC_sysctl_get_new_value: 1823 return &bpf_sysctl_get_new_value_proto; 1824 case BPF_FUNC_sysctl_set_new_value: 1825 return &bpf_sysctl_set_new_value_proto; 1826 case BPF_FUNC_ktime_get_coarse_ns: 1827 return &bpf_ktime_get_coarse_ns_proto; 1828 default: 1829 return cgroup_base_func_proto(func_id, prog); 1830 } 1831 } 1832 1833 static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type, 1834 const struct bpf_prog *prog, 1835 struct bpf_insn_access_aux *info) 1836 { 1837 const int size_default = sizeof(__u32); 1838 1839 if (off < 0 || off + size > sizeof(struct bpf_sysctl) || off % size) 1840 return false; 1841 1842 switch (off) { 1843 case bpf_ctx_range(struct bpf_sysctl, write): 1844 if (type != BPF_READ) 1845 return false; 1846 bpf_ctx_record_field_size(info, size_default); 1847 return bpf_ctx_narrow_access_ok(off, size, size_default); 1848 case bpf_ctx_range(struct bpf_sysctl, file_pos): 1849 if (type == BPF_READ) { 1850 bpf_ctx_record_field_size(info, size_default); 1851 return bpf_ctx_narrow_access_ok(off, size, size_default); 1852 } else { 1853 return size == size_default; 1854 } 1855 default: 1856 return false; 1857 } 1858 } 1859 1860 static u32 sysctl_convert_ctx_access(enum bpf_access_type type, 1861 const struct bpf_insn *si, 1862 struct bpf_insn *insn_buf, 1863 struct bpf_prog *prog, u32 *target_size) 1864 { 1865 struct bpf_insn *insn = insn_buf; 1866 u32 read_size; 1867 1868 switch (si->off) { 1869 case offsetof(struct bpf_sysctl, write): 1870 *insn++ = BPF_LDX_MEM( 1871 BPF_SIZE(si->code), si->dst_reg, si->src_reg, 1872 bpf_target_off(struct bpf_sysctl_kern, write, 1873 sizeof_field(struct bpf_sysctl_kern, 1874 write), 1875 target_size)); 1876 break; 1877 case offsetof(struct bpf_sysctl, file_pos): 1878 /* ppos is a pointer so it should be accessed via indirect 1879 * loads and stores. Also for stores additional temporary 1880 * register is used since neither src_reg nor dst_reg can be 1881 * overridden. 1882 */ 1883 if (type == BPF_WRITE) { 1884 int treg = BPF_REG_9; 1885 1886 if (si->src_reg == treg || si->dst_reg == treg) 1887 --treg; 1888 if (si->src_reg == treg || si->dst_reg == treg) 1889 --treg; 1890 *insn++ = BPF_STX_MEM( 1891 BPF_DW, si->dst_reg, treg, 1892 offsetof(struct bpf_sysctl_kern, tmp_reg)); 1893 *insn++ = BPF_LDX_MEM( 1894 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos), 1895 treg, si->dst_reg, 1896 offsetof(struct bpf_sysctl_kern, ppos)); 1897 *insn++ = BPF_STX_MEM( 1898 BPF_SIZEOF(u32), treg, si->src_reg, 1899 bpf_ctx_narrow_access_offset( 1900 0, sizeof(u32), sizeof(loff_t))); 1901 *insn++ = BPF_LDX_MEM( 1902 BPF_DW, treg, si->dst_reg, 1903 offsetof(struct bpf_sysctl_kern, tmp_reg)); 1904 } else { 1905 *insn++ = BPF_LDX_MEM( 1906 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos), 1907 si->dst_reg, si->src_reg, 1908 offsetof(struct bpf_sysctl_kern, ppos)); 1909 read_size = bpf_size_to_bytes(BPF_SIZE(si->code)); 1910 *insn++ = BPF_LDX_MEM( 1911 BPF_SIZE(si->code), si->dst_reg, si->dst_reg, 1912 bpf_ctx_narrow_access_offset( 1913 0, read_size, sizeof(loff_t))); 1914 } 1915 *target_size = sizeof(u32); 1916 break; 1917 } 1918 1919 return insn - insn_buf; 1920 } 1921 1922 const struct bpf_verifier_ops cg_sysctl_verifier_ops = { 1923 .get_func_proto = sysctl_func_proto, 1924 .is_valid_access = sysctl_is_valid_access, 1925 .convert_ctx_access = sysctl_convert_ctx_access, 1926 }; 1927 1928 const struct bpf_prog_ops cg_sysctl_prog_ops = { 1929 }; 1930 1931 #ifdef CONFIG_NET 1932 BPF_CALL_1(bpf_get_netns_cookie_sockopt, struct bpf_sockopt_kern *, ctx) 1933 { 1934 const struct net *net = ctx ? sock_net(ctx->sk) : &init_net; 1935 1936 return net->net_cookie; 1937 } 1938 1939 static const struct bpf_func_proto bpf_get_netns_cookie_sockopt_proto = { 1940 .func = bpf_get_netns_cookie_sockopt, 1941 .gpl_only = false, 1942 .ret_type = RET_INTEGER, 1943 .arg1_type = ARG_PTR_TO_CTX_OR_NULL, 1944 }; 1945 #endif 1946 1947 static const struct bpf_func_proto * 1948 cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1949 { 1950 switch (func_id) { 1951 #ifdef CONFIG_NET 1952 case BPF_FUNC_get_netns_cookie: 1953 return &bpf_get_netns_cookie_sockopt_proto; 1954 case BPF_FUNC_sk_storage_get: 1955 return &bpf_sk_storage_get_proto; 1956 case BPF_FUNC_sk_storage_delete: 1957 return &bpf_sk_storage_delete_proto; 1958 case BPF_FUNC_setsockopt: 1959 if (prog->expected_attach_type == BPF_CGROUP_SETSOCKOPT) 1960 return &bpf_sk_setsockopt_proto; 1961 return NULL; 1962 case BPF_FUNC_getsockopt: 1963 if (prog->expected_attach_type == BPF_CGROUP_SETSOCKOPT) 1964 return &bpf_sk_getsockopt_proto; 1965 return NULL; 1966 #endif 1967 #ifdef CONFIG_INET 1968 case BPF_FUNC_tcp_sock: 1969 return &bpf_tcp_sock_proto; 1970 #endif 1971 default: 1972 return cgroup_base_func_proto(func_id, prog); 1973 } 1974 } 1975 1976 static bool cg_sockopt_is_valid_access(int off, int size, 1977 enum bpf_access_type type, 1978 const struct bpf_prog *prog, 1979 struct bpf_insn_access_aux *info) 1980 { 1981 const int size_default = sizeof(__u32); 1982 1983 if (off < 0 || off >= sizeof(struct bpf_sockopt)) 1984 return false; 1985 1986 if (off % size != 0) 1987 return false; 1988 1989 if (type == BPF_WRITE) { 1990 switch (off) { 1991 case offsetof(struct bpf_sockopt, retval): 1992 if (size != size_default) 1993 return false; 1994 return prog->expected_attach_type == 1995 BPF_CGROUP_GETSOCKOPT; 1996 case offsetof(struct bpf_sockopt, optname): 1997 fallthrough; 1998 case offsetof(struct bpf_sockopt, level): 1999 if (size != size_default) 2000 return false; 2001 return prog->expected_attach_type == 2002 BPF_CGROUP_SETSOCKOPT; 2003 case offsetof(struct bpf_sockopt, optlen): 2004 return size == size_default; 2005 default: 2006 return false; 2007 } 2008 } 2009 2010 switch (off) { 2011 case offsetof(struct bpf_sockopt, sk): 2012 if (size != sizeof(__u64)) 2013 return false; 2014 info->reg_type = PTR_TO_SOCKET; 2015 break; 2016 case offsetof(struct bpf_sockopt, optval): 2017 if (size != sizeof(__u64)) 2018 return false; 2019 info->reg_type = PTR_TO_PACKET; 2020 break; 2021 case offsetof(struct bpf_sockopt, optval_end): 2022 if (size != sizeof(__u64)) 2023 return false; 2024 info->reg_type = PTR_TO_PACKET_END; 2025 break; 2026 case offsetof(struct bpf_sockopt, retval): 2027 if (size != size_default) 2028 return false; 2029 return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT; 2030 default: 2031 if (size != size_default) 2032 return false; 2033 break; 2034 } 2035 return true; 2036 } 2037 2038 #define CG_SOCKOPT_ACCESS_FIELD(T, F) \ 2039 T(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F), \ 2040 si->dst_reg, si->src_reg, \ 2041 offsetof(struct bpf_sockopt_kern, F)) 2042 2043 static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type, 2044 const struct bpf_insn *si, 2045 struct bpf_insn *insn_buf, 2046 struct bpf_prog *prog, 2047 u32 *target_size) 2048 { 2049 struct bpf_insn *insn = insn_buf; 2050 2051 switch (si->off) { 2052 case offsetof(struct bpf_sockopt, sk): 2053 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, sk); 2054 break; 2055 case offsetof(struct bpf_sockopt, level): 2056 if (type == BPF_WRITE) 2057 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, level); 2058 else 2059 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, level); 2060 break; 2061 case offsetof(struct bpf_sockopt, optname): 2062 if (type == BPF_WRITE) 2063 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optname); 2064 else 2065 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optname); 2066 break; 2067 case offsetof(struct bpf_sockopt, optlen): 2068 if (type == BPF_WRITE) 2069 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optlen); 2070 else 2071 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optlen); 2072 break; 2073 case offsetof(struct bpf_sockopt, retval): 2074 if (type == BPF_WRITE) 2075 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, retval); 2076 else 2077 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, retval); 2078 break; 2079 case offsetof(struct bpf_sockopt, optval): 2080 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval); 2081 break; 2082 case offsetof(struct bpf_sockopt, optval_end): 2083 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval_end); 2084 break; 2085 } 2086 2087 return insn - insn_buf; 2088 } 2089 2090 static int cg_sockopt_get_prologue(struct bpf_insn *insn_buf, 2091 bool direct_write, 2092 const struct bpf_prog *prog) 2093 { 2094 /* Nothing to do for sockopt argument. The data is kzalloc'ated. 2095 */ 2096 return 0; 2097 } 2098 2099 const struct bpf_verifier_ops cg_sockopt_verifier_ops = { 2100 .get_func_proto = cg_sockopt_func_proto, 2101 .is_valid_access = cg_sockopt_is_valid_access, 2102 .convert_ctx_access = cg_sockopt_convert_ctx_access, 2103 .gen_prologue = cg_sockopt_get_prologue, 2104 }; 2105 2106 const struct bpf_prog_ops cg_sockopt_prog_ops = { 2107 }; 2108