1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Functions to manage eBPF programs attached to cgroups 4 * 5 * Copyright (c) 2016 Daniel Mack 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/atomic.h> 10 #include <linux/cgroup.h> 11 #include <linux/filter.h> 12 #include <linux/slab.h> 13 #include <linux/sysctl.h> 14 #include <linux/string.h> 15 #include <linux/bpf.h> 16 #include <linux/bpf-cgroup.h> 17 #include <net/sock.h> 18 #include <net/bpf_sk_storage.h> 19 20 #include "../cgroup/cgroup-internal.h" 21 22 DEFINE_STATIC_KEY_ARRAY_FALSE(cgroup_bpf_enabled_key, MAX_CGROUP_BPF_ATTACH_TYPE); 23 EXPORT_SYMBOL(cgroup_bpf_enabled_key); 24 25 void cgroup_bpf_offline(struct cgroup *cgrp) 26 { 27 cgroup_get(cgrp); 28 percpu_ref_kill(&cgrp->bpf.refcnt); 29 } 30 31 static void bpf_cgroup_storages_free(struct bpf_cgroup_storage *storages[]) 32 { 33 enum bpf_cgroup_storage_type stype; 34 35 for_each_cgroup_storage_type(stype) 36 bpf_cgroup_storage_free(storages[stype]); 37 } 38 39 static int bpf_cgroup_storages_alloc(struct bpf_cgroup_storage *storages[], 40 struct bpf_cgroup_storage *new_storages[], 41 enum bpf_attach_type type, 42 struct bpf_prog *prog, 43 struct cgroup *cgrp) 44 { 45 enum bpf_cgroup_storage_type stype; 46 struct bpf_cgroup_storage_key key; 47 struct bpf_map *map; 48 49 key.cgroup_inode_id = cgroup_id(cgrp); 50 key.attach_type = type; 51 52 for_each_cgroup_storage_type(stype) { 53 map = prog->aux->cgroup_storage[stype]; 54 if (!map) 55 continue; 56 57 storages[stype] = cgroup_storage_lookup((void *)map, &key, false); 58 if (storages[stype]) 59 continue; 60 61 storages[stype] = bpf_cgroup_storage_alloc(prog, stype); 62 if (IS_ERR(storages[stype])) { 63 bpf_cgroup_storages_free(new_storages); 64 return -ENOMEM; 65 } 66 67 new_storages[stype] = storages[stype]; 68 } 69 70 return 0; 71 } 72 73 static void bpf_cgroup_storages_assign(struct bpf_cgroup_storage *dst[], 74 struct bpf_cgroup_storage *src[]) 75 { 76 enum bpf_cgroup_storage_type stype; 77 78 for_each_cgroup_storage_type(stype) 79 dst[stype] = src[stype]; 80 } 81 82 static void bpf_cgroup_storages_link(struct bpf_cgroup_storage *storages[], 83 struct cgroup *cgrp, 84 enum bpf_attach_type attach_type) 85 { 86 enum bpf_cgroup_storage_type stype; 87 88 for_each_cgroup_storage_type(stype) 89 bpf_cgroup_storage_link(storages[stype], cgrp, attach_type); 90 } 91 92 /* Called when bpf_cgroup_link is auto-detached from dying cgroup. 93 * It drops cgroup and bpf_prog refcounts, and marks bpf_link as defunct. It 94 * doesn't free link memory, which will eventually be done by bpf_link's 95 * release() callback, when its last FD is closed. 96 */ 97 static void bpf_cgroup_link_auto_detach(struct bpf_cgroup_link *link) 98 { 99 cgroup_put(link->cgroup); 100 link->cgroup = NULL; 101 } 102 103 /** 104 * cgroup_bpf_release() - put references of all bpf programs and 105 * release all cgroup bpf data 106 * @work: work structure embedded into the cgroup to modify 107 */ 108 static void cgroup_bpf_release(struct work_struct *work) 109 { 110 struct cgroup *p, *cgrp = container_of(work, struct cgroup, 111 bpf.release_work); 112 struct bpf_prog_array *old_array; 113 struct list_head *storages = &cgrp->bpf.storages; 114 struct bpf_cgroup_storage *storage, *stmp; 115 116 unsigned int atype; 117 118 mutex_lock(&cgroup_mutex); 119 120 for (atype = 0; atype < ARRAY_SIZE(cgrp->bpf.progs); atype++) { 121 struct list_head *progs = &cgrp->bpf.progs[atype]; 122 struct bpf_prog_list *pl, *pltmp; 123 124 list_for_each_entry_safe(pl, pltmp, progs, node) { 125 list_del(&pl->node); 126 if (pl->prog) 127 bpf_prog_put(pl->prog); 128 if (pl->link) 129 bpf_cgroup_link_auto_detach(pl->link); 130 kfree(pl); 131 static_branch_dec(&cgroup_bpf_enabled_key[atype]); 132 } 133 old_array = rcu_dereference_protected( 134 cgrp->bpf.effective[atype], 135 lockdep_is_held(&cgroup_mutex)); 136 bpf_prog_array_free(old_array); 137 } 138 139 list_for_each_entry_safe(storage, stmp, storages, list_cg) { 140 bpf_cgroup_storage_unlink(storage); 141 bpf_cgroup_storage_free(storage); 142 } 143 144 mutex_unlock(&cgroup_mutex); 145 146 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) 147 cgroup_bpf_put(p); 148 149 percpu_ref_exit(&cgrp->bpf.refcnt); 150 cgroup_put(cgrp); 151 } 152 153 /** 154 * cgroup_bpf_release_fn() - callback used to schedule releasing 155 * of bpf cgroup data 156 * @ref: percpu ref counter structure 157 */ 158 static void cgroup_bpf_release_fn(struct percpu_ref *ref) 159 { 160 struct cgroup *cgrp = container_of(ref, struct cgroup, bpf.refcnt); 161 162 INIT_WORK(&cgrp->bpf.release_work, cgroup_bpf_release); 163 queue_work(system_wq, &cgrp->bpf.release_work); 164 } 165 166 /* Get underlying bpf_prog of bpf_prog_list entry, regardless if it's through 167 * link or direct prog. 168 */ 169 static struct bpf_prog *prog_list_prog(struct bpf_prog_list *pl) 170 { 171 if (pl->prog) 172 return pl->prog; 173 if (pl->link) 174 return pl->link->link.prog; 175 return NULL; 176 } 177 178 /* count number of elements in the list. 179 * it's slow but the list cannot be long 180 */ 181 static u32 prog_list_length(struct list_head *head) 182 { 183 struct bpf_prog_list *pl; 184 u32 cnt = 0; 185 186 list_for_each_entry(pl, head, node) { 187 if (!prog_list_prog(pl)) 188 continue; 189 cnt++; 190 } 191 return cnt; 192 } 193 194 /* if parent has non-overridable prog attached, 195 * disallow attaching new programs to the descendent cgroup. 196 * if parent has overridable or multi-prog, allow attaching 197 */ 198 static bool hierarchy_allows_attach(struct cgroup *cgrp, 199 enum cgroup_bpf_attach_type atype) 200 { 201 struct cgroup *p; 202 203 p = cgroup_parent(cgrp); 204 if (!p) 205 return true; 206 do { 207 u32 flags = p->bpf.flags[atype]; 208 u32 cnt; 209 210 if (flags & BPF_F_ALLOW_MULTI) 211 return true; 212 cnt = prog_list_length(&p->bpf.progs[atype]); 213 WARN_ON_ONCE(cnt > 1); 214 if (cnt == 1) 215 return !!(flags & BPF_F_ALLOW_OVERRIDE); 216 p = cgroup_parent(p); 217 } while (p); 218 return true; 219 } 220 221 /* compute a chain of effective programs for a given cgroup: 222 * start from the list of programs in this cgroup and add 223 * all parent programs. 224 * Note that parent's F_ALLOW_OVERRIDE-type program is yielding 225 * to programs in this cgroup 226 */ 227 static int compute_effective_progs(struct cgroup *cgrp, 228 enum cgroup_bpf_attach_type atype, 229 struct bpf_prog_array **array) 230 { 231 struct bpf_prog_array_item *item; 232 struct bpf_prog_array *progs; 233 struct bpf_prog_list *pl; 234 struct cgroup *p = cgrp; 235 int cnt = 0; 236 237 /* count number of effective programs by walking parents */ 238 do { 239 if (cnt == 0 || (p->bpf.flags[atype] & BPF_F_ALLOW_MULTI)) 240 cnt += prog_list_length(&p->bpf.progs[atype]); 241 p = cgroup_parent(p); 242 } while (p); 243 244 progs = bpf_prog_array_alloc(cnt, GFP_KERNEL); 245 if (!progs) 246 return -ENOMEM; 247 248 /* populate the array with effective progs */ 249 cnt = 0; 250 p = cgrp; 251 do { 252 if (cnt > 0 && !(p->bpf.flags[atype] & BPF_F_ALLOW_MULTI)) 253 continue; 254 255 list_for_each_entry(pl, &p->bpf.progs[atype], node) { 256 if (!prog_list_prog(pl)) 257 continue; 258 259 item = &progs->items[cnt]; 260 item->prog = prog_list_prog(pl); 261 bpf_cgroup_storages_assign(item->cgroup_storage, 262 pl->storage); 263 cnt++; 264 } 265 } while ((p = cgroup_parent(p))); 266 267 *array = progs; 268 return 0; 269 } 270 271 static void activate_effective_progs(struct cgroup *cgrp, 272 enum cgroup_bpf_attach_type atype, 273 struct bpf_prog_array *old_array) 274 { 275 old_array = rcu_replace_pointer(cgrp->bpf.effective[atype], old_array, 276 lockdep_is_held(&cgroup_mutex)); 277 /* free prog array after grace period, since __cgroup_bpf_run_*() 278 * might be still walking the array 279 */ 280 bpf_prog_array_free(old_array); 281 } 282 283 /** 284 * cgroup_bpf_inherit() - inherit effective programs from parent 285 * @cgrp: the cgroup to modify 286 */ 287 int cgroup_bpf_inherit(struct cgroup *cgrp) 288 { 289 /* has to use marco instead of const int, since compiler thinks 290 * that array below is variable length 291 */ 292 #define NR ARRAY_SIZE(cgrp->bpf.effective) 293 struct bpf_prog_array *arrays[NR] = {}; 294 struct cgroup *p; 295 int ret, i; 296 297 ret = percpu_ref_init(&cgrp->bpf.refcnt, cgroup_bpf_release_fn, 0, 298 GFP_KERNEL); 299 if (ret) 300 return ret; 301 302 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) 303 cgroup_bpf_get(p); 304 305 for (i = 0; i < NR; i++) 306 INIT_LIST_HEAD(&cgrp->bpf.progs[i]); 307 308 INIT_LIST_HEAD(&cgrp->bpf.storages); 309 310 for (i = 0; i < NR; i++) 311 if (compute_effective_progs(cgrp, i, &arrays[i])) 312 goto cleanup; 313 314 for (i = 0; i < NR; i++) 315 activate_effective_progs(cgrp, i, arrays[i]); 316 317 return 0; 318 cleanup: 319 for (i = 0; i < NR; i++) 320 bpf_prog_array_free(arrays[i]); 321 322 for (p = cgroup_parent(cgrp); p; p = cgroup_parent(p)) 323 cgroup_bpf_put(p); 324 325 percpu_ref_exit(&cgrp->bpf.refcnt); 326 327 return -ENOMEM; 328 } 329 330 static int update_effective_progs(struct cgroup *cgrp, 331 enum cgroup_bpf_attach_type atype) 332 { 333 struct cgroup_subsys_state *css; 334 int err; 335 336 /* allocate and recompute effective prog arrays */ 337 css_for_each_descendant_pre(css, &cgrp->self) { 338 struct cgroup *desc = container_of(css, struct cgroup, self); 339 340 if (percpu_ref_is_zero(&desc->bpf.refcnt)) 341 continue; 342 343 err = compute_effective_progs(desc, atype, &desc->bpf.inactive); 344 if (err) 345 goto cleanup; 346 } 347 348 /* all allocations were successful. Activate all prog arrays */ 349 css_for_each_descendant_pre(css, &cgrp->self) { 350 struct cgroup *desc = container_of(css, struct cgroup, self); 351 352 if (percpu_ref_is_zero(&desc->bpf.refcnt)) { 353 if (unlikely(desc->bpf.inactive)) { 354 bpf_prog_array_free(desc->bpf.inactive); 355 desc->bpf.inactive = NULL; 356 } 357 continue; 358 } 359 360 activate_effective_progs(desc, atype, desc->bpf.inactive); 361 desc->bpf.inactive = NULL; 362 } 363 364 return 0; 365 366 cleanup: 367 /* oom while computing effective. Free all computed effective arrays 368 * since they were not activated 369 */ 370 css_for_each_descendant_pre(css, &cgrp->self) { 371 struct cgroup *desc = container_of(css, struct cgroup, self); 372 373 bpf_prog_array_free(desc->bpf.inactive); 374 desc->bpf.inactive = NULL; 375 } 376 377 return err; 378 } 379 380 #define BPF_CGROUP_MAX_PROGS 64 381 382 static struct bpf_prog_list *find_attach_entry(struct list_head *progs, 383 struct bpf_prog *prog, 384 struct bpf_cgroup_link *link, 385 struct bpf_prog *replace_prog, 386 bool allow_multi) 387 { 388 struct bpf_prog_list *pl; 389 390 /* single-attach case */ 391 if (!allow_multi) { 392 if (list_empty(progs)) 393 return NULL; 394 return list_first_entry(progs, typeof(*pl), node); 395 } 396 397 list_for_each_entry(pl, progs, node) { 398 if (prog && pl->prog == prog && prog != replace_prog) 399 /* disallow attaching the same prog twice */ 400 return ERR_PTR(-EINVAL); 401 if (link && pl->link == link) 402 /* disallow attaching the same link twice */ 403 return ERR_PTR(-EINVAL); 404 } 405 406 /* direct prog multi-attach w/ replacement case */ 407 if (replace_prog) { 408 list_for_each_entry(pl, progs, node) { 409 if (pl->prog == replace_prog) 410 /* a match found */ 411 return pl; 412 } 413 /* prog to replace not found for cgroup */ 414 return ERR_PTR(-ENOENT); 415 } 416 417 return NULL; 418 } 419 420 /** 421 * __cgroup_bpf_attach() - Attach the program or the link to a cgroup, and 422 * propagate the change to descendants 423 * @cgrp: The cgroup which descendants to traverse 424 * @prog: A program to attach 425 * @link: A link to attach 426 * @replace_prog: Previously attached program to replace if BPF_F_REPLACE is set 427 * @type: Type of attach operation 428 * @flags: Option flags 429 * 430 * Exactly one of @prog or @link can be non-null. 431 * Must be called with cgroup_mutex held. 432 */ 433 static int __cgroup_bpf_attach(struct cgroup *cgrp, 434 struct bpf_prog *prog, struct bpf_prog *replace_prog, 435 struct bpf_cgroup_link *link, 436 enum bpf_attach_type type, u32 flags) 437 { 438 u32 saved_flags = (flags & (BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI)); 439 struct bpf_prog *old_prog = NULL; 440 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {}; 441 struct bpf_cgroup_storage *new_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {}; 442 enum cgroup_bpf_attach_type atype; 443 struct bpf_prog_list *pl; 444 struct list_head *progs; 445 int err; 446 447 if (((flags & BPF_F_ALLOW_OVERRIDE) && (flags & BPF_F_ALLOW_MULTI)) || 448 ((flags & BPF_F_REPLACE) && !(flags & BPF_F_ALLOW_MULTI))) 449 /* invalid combination */ 450 return -EINVAL; 451 if (link && (prog || replace_prog)) 452 /* only either link or prog/replace_prog can be specified */ 453 return -EINVAL; 454 if (!!replace_prog != !!(flags & BPF_F_REPLACE)) 455 /* replace_prog implies BPF_F_REPLACE, and vice versa */ 456 return -EINVAL; 457 458 atype = to_cgroup_bpf_attach_type(type); 459 if (atype < 0) 460 return -EINVAL; 461 462 progs = &cgrp->bpf.progs[atype]; 463 464 if (!hierarchy_allows_attach(cgrp, atype)) 465 return -EPERM; 466 467 if (!list_empty(progs) && cgrp->bpf.flags[atype] != saved_flags) 468 /* Disallow attaching non-overridable on top 469 * of existing overridable in this cgroup. 470 * Disallow attaching multi-prog if overridable or none 471 */ 472 return -EPERM; 473 474 if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS) 475 return -E2BIG; 476 477 pl = find_attach_entry(progs, prog, link, replace_prog, 478 flags & BPF_F_ALLOW_MULTI); 479 if (IS_ERR(pl)) 480 return PTR_ERR(pl); 481 482 if (bpf_cgroup_storages_alloc(storage, new_storage, type, 483 prog ? : link->link.prog, cgrp)) 484 return -ENOMEM; 485 486 if (pl) { 487 old_prog = pl->prog; 488 } else { 489 pl = kmalloc(sizeof(*pl), GFP_KERNEL); 490 if (!pl) { 491 bpf_cgroup_storages_free(new_storage); 492 return -ENOMEM; 493 } 494 list_add_tail(&pl->node, progs); 495 } 496 497 pl->prog = prog; 498 pl->link = link; 499 bpf_cgroup_storages_assign(pl->storage, storage); 500 cgrp->bpf.flags[atype] = saved_flags; 501 502 err = update_effective_progs(cgrp, atype); 503 if (err) 504 goto cleanup; 505 506 if (old_prog) 507 bpf_prog_put(old_prog); 508 else 509 static_branch_inc(&cgroup_bpf_enabled_key[atype]); 510 bpf_cgroup_storages_link(new_storage, cgrp, type); 511 return 0; 512 513 cleanup: 514 if (old_prog) { 515 pl->prog = old_prog; 516 pl->link = NULL; 517 } 518 bpf_cgroup_storages_free(new_storage); 519 if (!old_prog) { 520 list_del(&pl->node); 521 kfree(pl); 522 } 523 return err; 524 } 525 526 static int cgroup_bpf_attach(struct cgroup *cgrp, 527 struct bpf_prog *prog, struct bpf_prog *replace_prog, 528 struct bpf_cgroup_link *link, 529 enum bpf_attach_type type, 530 u32 flags) 531 { 532 int ret; 533 534 mutex_lock(&cgroup_mutex); 535 ret = __cgroup_bpf_attach(cgrp, prog, replace_prog, link, type, flags); 536 mutex_unlock(&cgroup_mutex); 537 return ret; 538 } 539 540 /* Swap updated BPF program for given link in effective program arrays across 541 * all descendant cgroups. This function is guaranteed to succeed. 542 */ 543 static void replace_effective_prog(struct cgroup *cgrp, 544 enum cgroup_bpf_attach_type atype, 545 struct bpf_cgroup_link *link) 546 { 547 struct bpf_prog_array_item *item; 548 struct cgroup_subsys_state *css; 549 struct bpf_prog_array *progs; 550 struct bpf_prog_list *pl; 551 struct list_head *head; 552 struct cgroup *cg; 553 int pos; 554 555 css_for_each_descendant_pre(css, &cgrp->self) { 556 struct cgroup *desc = container_of(css, struct cgroup, self); 557 558 if (percpu_ref_is_zero(&desc->bpf.refcnt)) 559 continue; 560 561 /* find position of link in effective progs array */ 562 for (pos = 0, cg = desc; cg; cg = cgroup_parent(cg)) { 563 if (pos && !(cg->bpf.flags[atype] & BPF_F_ALLOW_MULTI)) 564 continue; 565 566 head = &cg->bpf.progs[atype]; 567 list_for_each_entry(pl, head, node) { 568 if (!prog_list_prog(pl)) 569 continue; 570 if (pl->link == link) 571 goto found; 572 pos++; 573 } 574 } 575 found: 576 BUG_ON(!cg); 577 progs = rcu_dereference_protected( 578 desc->bpf.effective[atype], 579 lockdep_is_held(&cgroup_mutex)); 580 item = &progs->items[pos]; 581 WRITE_ONCE(item->prog, link->link.prog); 582 } 583 } 584 585 /** 586 * __cgroup_bpf_replace() - Replace link's program and propagate the change 587 * to descendants 588 * @cgrp: The cgroup which descendants to traverse 589 * @link: A link for which to replace BPF program 590 * @type: Type of attach operation 591 * 592 * Must be called with cgroup_mutex held. 593 */ 594 static int __cgroup_bpf_replace(struct cgroup *cgrp, 595 struct bpf_cgroup_link *link, 596 struct bpf_prog *new_prog) 597 { 598 enum cgroup_bpf_attach_type atype; 599 struct bpf_prog *old_prog; 600 struct bpf_prog_list *pl; 601 struct list_head *progs; 602 bool found = false; 603 604 atype = to_cgroup_bpf_attach_type(link->type); 605 if (atype < 0) 606 return -EINVAL; 607 608 progs = &cgrp->bpf.progs[atype]; 609 610 if (link->link.prog->type != new_prog->type) 611 return -EINVAL; 612 613 list_for_each_entry(pl, progs, node) { 614 if (pl->link == link) { 615 found = true; 616 break; 617 } 618 } 619 if (!found) 620 return -ENOENT; 621 622 old_prog = xchg(&link->link.prog, new_prog); 623 replace_effective_prog(cgrp, atype, link); 624 bpf_prog_put(old_prog); 625 return 0; 626 } 627 628 static int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *new_prog, 629 struct bpf_prog *old_prog) 630 { 631 struct bpf_cgroup_link *cg_link; 632 int ret; 633 634 cg_link = container_of(link, struct bpf_cgroup_link, link); 635 636 mutex_lock(&cgroup_mutex); 637 /* link might have been auto-released by dying cgroup, so fail */ 638 if (!cg_link->cgroup) { 639 ret = -ENOLINK; 640 goto out_unlock; 641 } 642 if (old_prog && link->prog != old_prog) { 643 ret = -EPERM; 644 goto out_unlock; 645 } 646 ret = __cgroup_bpf_replace(cg_link->cgroup, cg_link, new_prog); 647 out_unlock: 648 mutex_unlock(&cgroup_mutex); 649 return ret; 650 } 651 652 static struct bpf_prog_list *find_detach_entry(struct list_head *progs, 653 struct bpf_prog *prog, 654 struct bpf_cgroup_link *link, 655 bool allow_multi) 656 { 657 struct bpf_prog_list *pl; 658 659 if (!allow_multi) { 660 if (list_empty(progs)) 661 /* report error when trying to detach and nothing is attached */ 662 return ERR_PTR(-ENOENT); 663 664 /* to maintain backward compatibility NONE and OVERRIDE cgroups 665 * allow detaching with invalid FD (prog==NULL) in legacy mode 666 */ 667 return list_first_entry(progs, typeof(*pl), node); 668 } 669 670 if (!prog && !link) 671 /* to detach MULTI prog the user has to specify valid FD 672 * of the program or link to be detached 673 */ 674 return ERR_PTR(-EINVAL); 675 676 /* find the prog or link and detach it */ 677 list_for_each_entry(pl, progs, node) { 678 if (pl->prog == prog && pl->link == link) 679 return pl; 680 } 681 return ERR_PTR(-ENOENT); 682 } 683 684 /** 685 * __cgroup_bpf_detach() - Detach the program or link from a cgroup, and 686 * propagate the change to descendants 687 * @cgrp: The cgroup which descendants to traverse 688 * @prog: A program to detach or NULL 689 * @link: A link to detach or NULL 690 * @type: Type of detach operation 691 * 692 * At most one of @prog or @link can be non-NULL. 693 * Must be called with cgroup_mutex held. 694 */ 695 static int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, 696 struct bpf_cgroup_link *link, enum bpf_attach_type type) 697 { 698 enum cgroup_bpf_attach_type atype; 699 struct bpf_prog *old_prog; 700 struct bpf_prog_list *pl; 701 struct list_head *progs; 702 u32 flags; 703 int err; 704 705 atype = to_cgroup_bpf_attach_type(type); 706 if (atype < 0) 707 return -EINVAL; 708 709 progs = &cgrp->bpf.progs[atype]; 710 flags = cgrp->bpf.flags[atype]; 711 712 if (prog && link) 713 /* only one of prog or link can be specified */ 714 return -EINVAL; 715 716 pl = find_detach_entry(progs, prog, link, flags & BPF_F_ALLOW_MULTI); 717 if (IS_ERR(pl)) 718 return PTR_ERR(pl); 719 720 /* mark it deleted, so it's ignored while recomputing effective */ 721 old_prog = pl->prog; 722 pl->prog = NULL; 723 pl->link = NULL; 724 725 err = update_effective_progs(cgrp, atype); 726 if (err) 727 goto cleanup; 728 729 /* now can actually delete it from this cgroup list */ 730 list_del(&pl->node); 731 kfree(pl); 732 if (list_empty(progs)) 733 /* last program was detached, reset flags to zero */ 734 cgrp->bpf.flags[atype] = 0; 735 if (old_prog) 736 bpf_prog_put(old_prog); 737 static_branch_dec(&cgroup_bpf_enabled_key[atype]); 738 return 0; 739 740 cleanup: 741 /* restore back prog or link */ 742 pl->prog = old_prog; 743 pl->link = link; 744 return err; 745 } 746 747 static int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, 748 enum bpf_attach_type type) 749 { 750 int ret; 751 752 mutex_lock(&cgroup_mutex); 753 ret = __cgroup_bpf_detach(cgrp, prog, NULL, type); 754 mutex_unlock(&cgroup_mutex); 755 return ret; 756 } 757 758 /* Must be called with cgroup_mutex held to avoid races. */ 759 static int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, 760 union bpf_attr __user *uattr) 761 { 762 __u32 __user *prog_ids = u64_to_user_ptr(attr->query.prog_ids); 763 enum bpf_attach_type type = attr->query.attach_type; 764 enum cgroup_bpf_attach_type atype; 765 struct bpf_prog_array *effective; 766 struct list_head *progs; 767 struct bpf_prog *prog; 768 int cnt, ret = 0, i; 769 u32 flags; 770 771 atype = to_cgroup_bpf_attach_type(type); 772 if (atype < 0) 773 return -EINVAL; 774 775 progs = &cgrp->bpf.progs[atype]; 776 flags = cgrp->bpf.flags[atype]; 777 778 effective = rcu_dereference_protected(cgrp->bpf.effective[atype], 779 lockdep_is_held(&cgroup_mutex)); 780 781 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) 782 cnt = bpf_prog_array_length(effective); 783 else 784 cnt = prog_list_length(progs); 785 786 if (copy_to_user(&uattr->query.attach_flags, &flags, sizeof(flags))) 787 return -EFAULT; 788 if (copy_to_user(&uattr->query.prog_cnt, &cnt, sizeof(cnt))) 789 return -EFAULT; 790 if (attr->query.prog_cnt == 0 || !prog_ids || !cnt) 791 /* return early if user requested only program count + flags */ 792 return 0; 793 if (attr->query.prog_cnt < cnt) { 794 cnt = attr->query.prog_cnt; 795 ret = -ENOSPC; 796 } 797 798 if (attr->query.query_flags & BPF_F_QUERY_EFFECTIVE) { 799 return bpf_prog_array_copy_to_user(effective, prog_ids, cnt); 800 } else { 801 struct bpf_prog_list *pl; 802 u32 id; 803 804 i = 0; 805 list_for_each_entry(pl, progs, node) { 806 prog = prog_list_prog(pl); 807 id = prog->aux->id; 808 if (copy_to_user(prog_ids + i, &id, sizeof(id))) 809 return -EFAULT; 810 if (++i == cnt) 811 break; 812 } 813 } 814 return ret; 815 } 816 817 static int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, 818 union bpf_attr __user *uattr) 819 { 820 int ret; 821 822 mutex_lock(&cgroup_mutex); 823 ret = __cgroup_bpf_query(cgrp, attr, uattr); 824 mutex_unlock(&cgroup_mutex); 825 return ret; 826 } 827 828 int cgroup_bpf_prog_attach(const union bpf_attr *attr, 829 enum bpf_prog_type ptype, struct bpf_prog *prog) 830 { 831 struct bpf_prog *replace_prog = NULL; 832 struct cgroup *cgrp; 833 int ret; 834 835 cgrp = cgroup_get_from_fd(attr->target_fd); 836 if (IS_ERR(cgrp)) 837 return PTR_ERR(cgrp); 838 839 if ((attr->attach_flags & BPF_F_ALLOW_MULTI) && 840 (attr->attach_flags & BPF_F_REPLACE)) { 841 replace_prog = bpf_prog_get_type(attr->replace_bpf_fd, ptype); 842 if (IS_ERR(replace_prog)) { 843 cgroup_put(cgrp); 844 return PTR_ERR(replace_prog); 845 } 846 } 847 848 ret = cgroup_bpf_attach(cgrp, prog, replace_prog, NULL, 849 attr->attach_type, attr->attach_flags); 850 851 if (replace_prog) 852 bpf_prog_put(replace_prog); 853 cgroup_put(cgrp); 854 return ret; 855 } 856 857 int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype) 858 { 859 struct bpf_prog *prog; 860 struct cgroup *cgrp; 861 int ret; 862 863 cgrp = cgroup_get_from_fd(attr->target_fd); 864 if (IS_ERR(cgrp)) 865 return PTR_ERR(cgrp); 866 867 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype); 868 if (IS_ERR(prog)) 869 prog = NULL; 870 871 ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type); 872 if (prog) 873 bpf_prog_put(prog); 874 875 cgroup_put(cgrp); 876 return ret; 877 } 878 879 static void bpf_cgroup_link_release(struct bpf_link *link) 880 { 881 struct bpf_cgroup_link *cg_link = 882 container_of(link, struct bpf_cgroup_link, link); 883 struct cgroup *cg; 884 885 /* link might have been auto-detached by dying cgroup already, 886 * in that case our work is done here 887 */ 888 if (!cg_link->cgroup) 889 return; 890 891 mutex_lock(&cgroup_mutex); 892 893 /* re-check cgroup under lock again */ 894 if (!cg_link->cgroup) { 895 mutex_unlock(&cgroup_mutex); 896 return; 897 } 898 899 WARN_ON(__cgroup_bpf_detach(cg_link->cgroup, NULL, cg_link, 900 cg_link->type)); 901 902 cg = cg_link->cgroup; 903 cg_link->cgroup = NULL; 904 905 mutex_unlock(&cgroup_mutex); 906 907 cgroup_put(cg); 908 } 909 910 static void bpf_cgroup_link_dealloc(struct bpf_link *link) 911 { 912 struct bpf_cgroup_link *cg_link = 913 container_of(link, struct bpf_cgroup_link, link); 914 915 kfree(cg_link); 916 } 917 918 static int bpf_cgroup_link_detach(struct bpf_link *link) 919 { 920 bpf_cgroup_link_release(link); 921 922 return 0; 923 } 924 925 static void bpf_cgroup_link_show_fdinfo(const struct bpf_link *link, 926 struct seq_file *seq) 927 { 928 struct bpf_cgroup_link *cg_link = 929 container_of(link, struct bpf_cgroup_link, link); 930 u64 cg_id = 0; 931 932 mutex_lock(&cgroup_mutex); 933 if (cg_link->cgroup) 934 cg_id = cgroup_id(cg_link->cgroup); 935 mutex_unlock(&cgroup_mutex); 936 937 seq_printf(seq, 938 "cgroup_id:\t%llu\n" 939 "attach_type:\t%d\n", 940 cg_id, 941 cg_link->type); 942 } 943 944 static int bpf_cgroup_link_fill_link_info(const struct bpf_link *link, 945 struct bpf_link_info *info) 946 { 947 struct bpf_cgroup_link *cg_link = 948 container_of(link, struct bpf_cgroup_link, link); 949 u64 cg_id = 0; 950 951 mutex_lock(&cgroup_mutex); 952 if (cg_link->cgroup) 953 cg_id = cgroup_id(cg_link->cgroup); 954 mutex_unlock(&cgroup_mutex); 955 956 info->cgroup.cgroup_id = cg_id; 957 info->cgroup.attach_type = cg_link->type; 958 return 0; 959 } 960 961 static const struct bpf_link_ops bpf_cgroup_link_lops = { 962 .release = bpf_cgroup_link_release, 963 .dealloc = bpf_cgroup_link_dealloc, 964 .detach = bpf_cgroup_link_detach, 965 .update_prog = cgroup_bpf_replace, 966 .show_fdinfo = bpf_cgroup_link_show_fdinfo, 967 .fill_link_info = bpf_cgroup_link_fill_link_info, 968 }; 969 970 int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog) 971 { 972 struct bpf_link_primer link_primer; 973 struct bpf_cgroup_link *link; 974 struct cgroup *cgrp; 975 int err; 976 977 if (attr->link_create.flags) 978 return -EINVAL; 979 980 cgrp = cgroup_get_from_fd(attr->link_create.target_fd); 981 if (IS_ERR(cgrp)) 982 return PTR_ERR(cgrp); 983 984 link = kzalloc(sizeof(*link), GFP_USER); 985 if (!link) { 986 err = -ENOMEM; 987 goto out_put_cgroup; 988 } 989 bpf_link_init(&link->link, BPF_LINK_TYPE_CGROUP, &bpf_cgroup_link_lops, 990 prog); 991 link->cgroup = cgrp; 992 link->type = attr->link_create.attach_type; 993 994 err = bpf_link_prime(&link->link, &link_primer); 995 if (err) { 996 kfree(link); 997 goto out_put_cgroup; 998 } 999 1000 err = cgroup_bpf_attach(cgrp, NULL, NULL, link, 1001 link->type, BPF_F_ALLOW_MULTI); 1002 if (err) { 1003 bpf_link_cleanup(&link_primer); 1004 goto out_put_cgroup; 1005 } 1006 1007 return bpf_link_settle(&link_primer); 1008 1009 out_put_cgroup: 1010 cgroup_put(cgrp); 1011 return err; 1012 } 1013 1014 int cgroup_bpf_prog_query(const union bpf_attr *attr, 1015 union bpf_attr __user *uattr) 1016 { 1017 struct cgroup *cgrp; 1018 int ret; 1019 1020 cgrp = cgroup_get_from_fd(attr->query.target_fd); 1021 if (IS_ERR(cgrp)) 1022 return PTR_ERR(cgrp); 1023 1024 ret = cgroup_bpf_query(cgrp, attr, uattr); 1025 1026 cgroup_put(cgrp); 1027 return ret; 1028 } 1029 1030 /** 1031 * __cgroup_bpf_run_filter_skb() - Run a program for packet filtering 1032 * @sk: The socket sending or receiving traffic 1033 * @skb: The skb that is being sent or received 1034 * @type: The type of program to be exectuted 1035 * 1036 * If no socket is passed, or the socket is not of type INET or INET6, 1037 * this function does nothing and returns 0. 1038 * 1039 * The program type passed in via @type must be suitable for network 1040 * filtering. No further check is performed to assert that. 1041 * 1042 * For egress packets, this function can return: 1043 * NET_XMIT_SUCCESS (0) - continue with packet output 1044 * NET_XMIT_DROP (1) - drop packet and notify TCP to call cwr 1045 * NET_XMIT_CN (2) - continue with packet output and notify TCP 1046 * to call cwr 1047 * -EPERM - drop packet 1048 * 1049 * For ingress packets, this function will return -EPERM if any 1050 * attached program was found and if it returned != 1 during execution. 1051 * Otherwise 0 is returned. 1052 */ 1053 int __cgroup_bpf_run_filter_skb(struct sock *sk, 1054 struct sk_buff *skb, 1055 enum cgroup_bpf_attach_type atype) 1056 { 1057 unsigned int offset = skb->data - skb_network_header(skb); 1058 struct sock *save_sk; 1059 void *saved_data_end; 1060 struct cgroup *cgrp; 1061 int ret; 1062 1063 if (!sk || !sk_fullsock(sk)) 1064 return 0; 1065 1066 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6) 1067 return 0; 1068 1069 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1070 save_sk = skb->sk; 1071 skb->sk = sk; 1072 __skb_push(skb, offset); 1073 1074 /* compute pointers for the bpf prog */ 1075 bpf_compute_and_save_data_end(skb, &saved_data_end); 1076 1077 if (atype == CGROUP_INET_EGRESS) { 1078 ret = BPF_PROG_CGROUP_INET_EGRESS_RUN_ARRAY( 1079 cgrp->bpf.effective[atype], skb, __bpf_prog_run_save_cb); 1080 } else { 1081 ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], skb, 1082 __bpf_prog_run_save_cb, 0); 1083 } 1084 bpf_restore_data_end(skb, saved_data_end); 1085 __skb_pull(skb, offset); 1086 skb->sk = save_sk; 1087 1088 return ret; 1089 } 1090 EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb); 1091 1092 /** 1093 * __cgroup_bpf_run_filter_sk() - Run a program on a sock 1094 * @sk: sock structure to manipulate 1095 * @type: The type of program to be exectuted 1096 * 1097 * socket is passed is expected to be of type INET or INET6. 1098 * 1099 * The program type passed in via @type must be suitable for sock 1100 * filtering. No further check is performed to assert that. 1101 * 1102 * This function will return %-EPERM if any if an attached program was found 1103 * and if it returned != 1 during execution. In all other cases, 0 is returned. 1104 */ 1105 int __cgroup_bpf_run_filter_sk(struct sock *sk, 1106 enum cgroup_bpf_attach_type atype) 1107 { 1108 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1109 1110 return BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], sk, 1111 bpf_prog_run, 0); 1112 } 1113 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sk); 1114 1115 /** 1116 * __cgroup_bpf_run_filter_sock_addr() - Run a program on a sock and 1117 * provided by user sockaddr 1118 * @sk: sock struct that will use sockaddr 1119 * @uaddr: sockaddr struct provided by user 1120 * @type: The type of program to be exectuted 1121 * @t_ctx: Pointer to attach type specific context 1122 * @flags: Pointer to u32 which contains higher bits of BPF program 1123 * return value (OR'ed together). 1124 * 1125 * socket is expected to be of type INET or INET6. 1126 * 1127 * This function will return %-EPERM if an attached program is found and 1128 * returned value != 1 during execution. In all other cases, 0 is returned. 1129 */ 1130 int __cgroup_bpf_run_filter_sock_addr(struct sock *sk, 1131 struct sockaddr *uaddr, 1132 enum cgroup_bpf_attach_type atype, 1133 void *t_ctx, 1134 u32 *flags) 1135 { 1136 struct bpf_sock_addr_kern ctx = { 1137 .sk = sk, 1138 .uaddr = uaddr, 1139 .t_ctx = t_ctx, 1140 }; 1141 struct sockaddr_storage unspec; 1142 struct cgroup *cgrp; 1143 1144 /* Check socket family since not all sockets represent network 1145 * endpoint (e.g. AF_UNIX). 1146 */ 1147 if (sk->sk_family != AF_INET && sk->sk_family != AF_INET6) 1148 return 0; 1149 1150 if (!ctx.uaddr) { 1151 memset(&unspec, 0, sizeof(unspec)); 1152 ctx.uaddr = (struct sockaddr *)&unspec; 1153 } 1154 1155 cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1156 return BPF_PROG_RUN_ARRAY_CG_FLAGS(cgrp->bpf.effective[atype], &ctx, 1157 bpf_prog_run, 0, flags); 1158 } 1159 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_addr); 1160 1161 /** 1162 * __cgroup_bpf_run_filter_sock_ops() - Run a program on a sock 1163 * @sk: socket to get cgroup from 1164 * @sock_ops: bpf_sock_ops_kern struct to pass to program. Contains 1165 * sk with connection information (IP addresses, etc.) May not contain 1166 * cgroup info if it is a req sock. 1167 * @type: The type of program to be exectuted 1168 * 1169 * socket passed is expected to be of type INET or INET6. 1170 * 1171 * The program type passed in via @type must be suitable for sock_ops 1172 * filtering. No further check is performed to assert that. 1173 * 1174 * This function will return %-EPERM if any if an attached program was found 1175 * and if it returned != 1 during execution. In all other cases, 0 is returned. 1176 */ 1177 int __cgroup_bpf_run_filter_sock_ops(struct sock *sk, 1178 struct bpf_sock_ops_kern *sock_ops, 1179 enum cgroup_bpf_attach_type atype) 1180 { 1181 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1182 1183 return BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], sock_ops, 1184 bpf_prog_run, 0); 1185 } 1186 EXPORT_SYMBOL(__cgroup_bpf_run_filter_sock_ops); 1187 1188 int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, 1189 short access, enum cgroup_bpf_attach_type atype) 1190 { 1191 struct cgroup *cgrp; 1192 struct bpf_cgroup_dev_ctx ctx = { 1193 .access_type = (access << 16) | dev_type, 1194 .major = major, 1195 .minor = minor, 1196 }; 1197 int ret; 1198 1199 rcu_read_lock(); 1200 cgrp = task_dfl_cgroup(current); 1201 ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], &ctx, 1202 bpf_prog_run, 0); 1203 rcu_read_unlock(); 1204 1205 return ret; 1206 } 1207 1208 static const struct bpf_func_proto * 1209 cgroup_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1210 { 1211 switch (func_id) { 1212 case BPF_FUNC_get_current_uid_gid: 1213 return &bpf_get_current_uid_gid_proto; 1214 case BPF_FUNC_get_local_storage: 1215 return &bpf_get_local_storage_proto; 1216 case BPF_FUNC_get_current_cgroup_id: 1217 return &bpf_get_current_cgroup_id_proto; 1218 case BPF_FUNC_perf_event_output: 1219 return &bpf_event_output_data_proto; 1220 default: 1221 return bpf_base_func_proto(func_id); 1222 } 1223 } 1224 1225 static const struct bpf_func_proto * 1226 cgroup_dev_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1227 { 1228 return cgroup_base_func_proto(func_id, prog); 1229 } 1230 1231 static bool cgroup_dev_is_valid_access(int off, int size, 1232 enum bpf_access_type type, 1233 const struct bpf_prog *prog, 1234 struct bpf_insn_access_aux *info) 1235 { 1236 const int size_default = sizeof(__u32); 1237 1238 if (type == BPF_WRITE) 1239 return false; 1240 1241 if (off < 0 || off + size > sizeof(struct bpf_cgroup_dev_ctx)) 1242 return false; 1243 /* The verifier guarantees that size > 0. */ 1244 if (off % size != 0) 1245 return false; 1246 1247 switch (off) { 1248 case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type): 1249 bpf_ctx_record_field_size(info, size_default); 1250 if (!bpf_ctx_narrow_access_ok(off, size, size_default)) 1251 return false; 1252 break; 1253 default: 1254 if (size != size_default) 1255 return false; 1256 } 1257 1258 return true; 1259 } 1260 1261 const struct bpf_prog_ops cg_dev_prog_ops = { 1262 }; 1263 1264 const struct bpf_verifier_ops cg_dev_verifier_ops = { 1265 .get_func_proto = cgroup_dev_func_proto, 1266 .is_valid_access = cgroup_dev_is_valid_access, 1267 }; 1268 1269 /** 1270 * __cgroup_bpf_run_filter_sysctl - Run a program on sysctl 1271 * 1272 * @head: sysctl table header 1273 * @table: sysctl table 1274 * @write: sysctl is being read (= 0) or written (= 1) 1275 * @buf: pointer to buffer (in and out) 1276 * @pcount: value-result argument: value is size of buffer pointed to by @buf, 1277 * result is size of @new_buf if program set new value, initial value 1278 * otherwise 1279 * @ppos: value-result argument: value is position at which read from or write 1280 * to sysctl is happening, result is new position if program overrode it, 1281 * initial value otherwise 1282 * @type: type of program to be executed 1283 * 1284 * Program is run when sysctl is being accessed, either read or written, and 1285 * can allow or deny such access. 1286 * 1287 * This function will return %-EPERM if an attached program is found and 1288 * returned value != 1 during execution. In all other cases 0 is returned. 1289 */ 1290 int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head, 1291 struct ctl_table *table, int write, 1292 char **buf, size_t *pcount, loff_t *ppos, 1293 enum cgroup_bpf_attach_type atype) 1294 { 1295 struct bpf_sysctl_kern ctx = { 1296 .head = head, 1297 .table = table, 1298 .write = write, 1299 .ppos = ppos, 1300 .cur_val = NULL, 1301 .cur_len = PAGE_SIZE, 1302 .new_val = NULL, 1303 .new_len = 0, 1304 .new_updated = 0, 1305 }; 1306 struct cgroup *cgrp; 1307 loff_t pos = 0; 1308 int ret; 1309 1310 ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL); 1311 if (!ctx.cur_val || 1312 table->proc_handler(table, 0, ctx.cur_val, &ctx.cur_len, &pos)) { 1313 /* Let BPF program decide how to proceed. */ 1314 ctx.cur_len = 0; 1315 } 1316 1317 if (write && *buf && *pcount) { 1318 /* BPF program should be able to override new value with a 1319 * buffer bigger than provided by user. 1320 */ 1321 ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL); 1322 ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount); 1323 if (ctx.new_val) { 1324 memcpy(ctx.new_val, *buf, ctx.new_len); 1325 } else { 1326 /* Let BPF program decide how to proceed. */ 1327 ctx.new_len = 0; 1328 } 1329 } 1330 1331 rcu_read_lock(); 1332 cgrp = task_dfl_cgroup(current); 1333 ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[atype], &ctx, 1334 bpf_prog_run, 0); 1335 rcu_read_unlock(); 1336 1337 kfree(ctx.cur_val); 1338 1339 if (ret == 1 && ctx.new_updated) { 1340 kfree(*buf); 1341 *buf = ctx.new_val; 1342 *pcount = ctx.new_len; 1343 } else { 1344 kfree(ctx.new_val); 1345 } 1346 1347 return ret; 1348 } 1349 1350 #ifdef CONFIG_NET 1351 static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp, 1352 enum cgroup_bpf_attach_type attach_type) 1353 { 1354 struct bpf_prog_array *prog_array; 1355 bool empty; 1356 1357 rcu_read_lock(); 1358 prog_array = rcu_dereference(cgrp->bpf.effective[attach_type]); 1359 empty = bpf_prog_array_is_empty(prog_array); 1360 rcu_read_unlock(); 1361 1362 return empty; 1363 } 1364 1365 static int sockopt_alloc_buf(struct bpf_sockopt_kern *ctx, int max_optlen, 1366 struct bpf_sockopt_buf *buf) 1367 { 1368 if (unlikely(max_optlen < 0)) 1369 return -EINVAL; 1370 1371 if (unlikely(max_optlen > PAGE_SIZE)) { 1372 /* We don't expose optvals that are greater than PAGE_SIZE 1373 * to the BPF program. 1374 */ 1375 max_optlen = PAGE_SIZE; 1376 } 1377 1378 if (max_optlen <= sizeof(buf->data)) { 1379 /* When the optval fits into BPF_SOCKOPT_KERN_BUF_SIZE 1380 * bytes avoid the cost of kzalloc. 1381 */ 1382 ctx->optval = buf->data; 1383 ctx->optval_end = ctx->optval + max_optlen; 1384 return max_optlen; 1385 } 1386 1387 ctx->optval = kzalloc(max_optlen, GFP_USER); 1388 if (!ctx->optval) 1389 return -ENOMEM; 1390 1391 ctx->optval_end = ctx->optval + max_optlen; 1392 1393 return max_optlen; 1394 } 1395 1396 static void sockopt_free_buf(struct bpf_sockopt_kern *ctx, 1397 struct bpf_sockopt_buf *buf) 1398 { 1399 if (ctx->optval == buf->data) 1400 return; 1401 kfree(ctx->optval); 1402 } 1403 1404 static bool sockopt_buf_allocated(struct bpf_sockopt_kern *ctx, 1405 struct bpf_sockopt_buf *buf) 1406 { 1407 return ctx->optval != buf->data; 1408 } 1409 1410 int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level, 1411 int *optname, char __user *optval, 1412 int *optlen, char **kernel_optval) 1413 { 1414 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1415 struct bpf_sockopt_buf buf = {}; 1416 struct bpf_sockopt_kern ctx = { 1417 .sk = sk, 1418 .level = *level, 1419 .optname = *optname, 1420 }; 1421 int ret, max_optlen; 1422 1423 /* Opportunistic check to see whether we have any BPF program 1424 * attached to the hook so we don't waste time allocating 1425 * memory and locking the socket. 1426 */ 1427 if (__cgroup_bpf_prog_array_is_empty(cgrp, CGROUP_SETSOCKOPT)) 1428 return 0; 1429 1430 /* Allocate a bit more than the initial user buffer for 1431 * BPF program. The canonical use case is overriding 1432 * TCP_CONGESTION(nv) to TCP_CONGESTION(cubic). 1433 */ 1434 max_optlen = max_t(int, 16, *optlen); 1435 1436 max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf); 1437 if (max_optlen < 0) 1438 return max_optlen; 1439 1440 ctx.optlen = *optlen; 1441 1442 if (copy_from_user(ctx.optval, optval, min(*optlen, max_optlen)) != 0) { 1443 ret = -EFAULT; 1444 goto out; 1445 } 1446 1447 lock_sock(sk); 1448 ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[CGROUP_SETSOCKOPT], 1449 &ctx, bpf_prog_run, 0); 1450 release_sock(sk); 1451 1452 if (ret) 1453 goto out; 1454 1455 if (ctx.optlen == -1) { 1456 /* optlen set to -1, bypass kernel */ 1457 ret = 1; 1458 } else if (ctx.optlen > max_optlen || ctx.optlen < -1) { 1459 /* optlen is out of bounds */ 1460 ret = -EFAULT; 1461 } else { 1462 /* optlen within bounds, run kernel handler */ 1463 ret = 0; 1464 1465 /* export any potential modifications */ 1466 *level = ctx.level; 1467 *optname = ctx.optname; 1468 1469 /* optlen == 0 from BPF indicates that we should 1470 * use original userspace data. 1471 */ 1472 if (ctx.optlen != 0) { 1473 *optlen = ctx.optlen; 1474 /* We've used bpf_sockopt_kern->buf as an intermediary 1475 * storage, but the BPF program indicates that we need 1476 * to pass this data to the kernel setsockopt handler. 1477 * No way to export on-stack buf, have to allocate a 1478 * new buffer. 1479 */ 1480 if (!sockopt_buf_allocated(&ctx, &buf)) { 1481 void *p = kmalloc(ctx.optlen, GFP_USER); 1482 1483 if (!p) { 1484 ret = -ENOMEM; 1485 goto out; 1486 } 1487 memcpy(p, ctx.optval, ctx.optlen); 1488 *kernel_optval = p; 1489 } else { 1490 *kernel_optval = ctx.optval; 1491 } 1492 /* export and don't free sockopt buf */ 1493 return 0; 1494 } 1495 } 1496 1497 out: 1498 sockopt_free_buf(&ctx, &buf); 1499 return ret; 1500 } 1501 1502 int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level, 1503 int optname, char __user *optval, 1504 int __user *optlen, int max_optlen, 1505 int retval) 1506 { 1507 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1508 struct bpf_sockopt_buf buf = {}; 1509 struct bpf_sockopt_kern ctx = { 1510 .sk = sk, 1511 .level = level, 1512 .optname = optname, 1513 .current_task = current, 1514 }; 1515 int ret; 1516 1517 /* Opportunistic check to see whether we have any BPF program 1518 * attached to the hook so we don't waste time allocating 1519 * memory and locking the socket. 1520 */ 1521 if (__cgroup_bpf_prog_array_is_empty(cgrp, CGROUP_GETSOCKOPT)) 1522 return retval; 1523 1524 ctx.optlen = max_optlen; 1525 1526 max_optlen = sockopt_alloc_buf(&ctx, max_optlen, &buf); 1527 if (max_optlen < 0) 1528 return max_optlen; 1529 1530 if (!retval) { 1531 /* If kernel getsockopt finished successfully, 1532 * copy whatever was returned to the user back 1533 * into our temporary buffer. Set optlen to the 1534 * one that kernel returned as well to let 1535 * BPF programs inspect the value. 1536 */ 1537 1538 if (get_user(ctx.optlen, optlen)) { 1539 ret = -EFAULT; 1540 goto out; 1541 } 1542 1543 if (ctx.optlen < 0) { 1544 ret = -EFAULT; 1545 goto out; 1546 } 1547 1548 if (copy_from_user(ctx.optval, optval, 1549 min(ctx.optlen, max_optlen)) != 0) { 1550 ret = -EFAULT; 1551 goto out; 1552 } 1553 } 1554 1555 lock_sock(sk); 1556 ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[CGROUP_GETSOCKOPT], 1557 &ctx, bpf_prog_run, retval); 1558 release_sock(sk); 1559 1560 if (ret < 0) 1561 goto out; 1562 1563 if (ctx.optlen > max_optlen || ctx.optlen < 0) { 1564 ret = -EFAULT; 1565 goto out; 1566 } 1567 1568 if (ctx.optlen != 0) { 1569 if (copy_to_user(optval, ctx.optval, ctx.optlen) || 1570 put_user(ctx.optlen, optlen)) { 1571 ret = -EFAULT; 1572 goto out; 1573 } 1574 } 1575 1576 out: 1577 sockopt_free_buf(&ctx, &buf); 1578 return ret; 1579 } 1580 1581 int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level, 1582 int optname, void *optval, 1583 int *optlen, int retval) 1584 { 1585 struct cgroup *cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data); 1586 struct bpf_sockopt_kern ctx = { 1587 .sk = sk, 1588 .level = level, 1589 .optname = optname, 1590 .optlen = *optlen, 1591 .optval = optval, 1592 .optval_end = optval + *optlen, 1593 .current_task = current, 1594 }; 1595 int ret; 1596 1597 /* Note that __cgroup_bpf_run_filter_getsockopt doesn't copy 1598 * user data back into BPF buffer when reval != 0. This is 1599 * done as an optimization to avoid extra copy, assuming 1600 * kernel won't populate the data in case of an error. 1601 * Here we always pass the data and memset() should 1602 * be called if that data shouldn't be "exported". 1603 */ 1604 1605 ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[CGROUP_GETSOCKOPT], 1606 &ctx, bpf_prog_run, retval); 1607 if (ret < 0) 1608 return ret; 1609 1610 if (ctx.optlen > *optlen) 1611 return -EFAULT; 1612 1613 /* BPF programs can shrink the buffer, export the modifications. 1614 */ 1615 if (ctx.optlen != 0) 1616 *optlen = ctx.optlen; 1617 1618 return ret; 1619 } 1620 #endif 1621 1622 static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp, 1623 size_t *lenp) 1624 { 1625 ssize_t tmp_ret = 0, ret; 1626 1627 if (dir->header.parent) { 1628 tmp_ret = sysctl_cpy_dir(dir->header.parent, bufp, lenp); 1629 if (tmp_ret < 0) 1630 return tmp_ret; 1631 } 1632 1633 ret = strscpy(*bufp, dir->header.ctl_table[0].procname, *lenp); 1634 if (ret < 0) 1635 return ret; 1636 *bufp += ret; 1637 *lenp -= ret; 1638 ret += tmp_ret; 1639 1640 /* Avoid leading slash. */ 1641 if (!ret) 1642 return ret; 1643 1644 tmp_ret = strscpy(*bufp, "/", *lenp); 1645 if (tmp_ret < 0) 1646 return tmp_ret; 1647 *bufp += tmp_ret; 1648 *lenp -= tmp_ret; 1649 1650 return ret + tmp_ret; 1651 } 1652 1653 BPF_CALL_4(bpf_sysctl_get_name, struct bpf_sysctl_kern *, ctx, char *, buf, 1654 size_t, buf_len, u64, flags) 1655 { 1656 ssize_t tmp_ret = 0, ret; 1657 1658 if (!buf) 1659 return -EINVAL; 1660 1661 if (!(flags & BPF_F_SYSCTL_BASE_NAME)) { 1662 if (!ctx->head) 1663 return -EINVAL; 1664 tmp_ret = sysctl_cpy_dir(ctx->head->parent, &buf, &buf_len); 1665 if (tmp_ret < 0) 1666 return tmp_ret; 1667 } 1668 1669 ret = strscpy(buf, ctx->table->procname, buf_len); 1670 1671 return ret < 0 ? ret : tmp_ret + ret; 1672 } 1673 1674 static const struct bpf_func_proto bpf_sysctl_get_name_proto = { 1675 .func = bpf_sysctl_get_name, 1676 .gpl_only = false, 1677 .ret_type = RET_INTEGER, 1678 .arg1_type = ARG_PTR_TO_CTX, 1679 .arg2_type = ARG_PTR_TO_MEM, 1680 .arg3_type = ARG_CONST_SIZE, 1681 .arg4_type = ARG_ANYTHING, 1682 }; 1683 1684 static int copy_sysctl_value(char *dst, size_t dst_len, char *src, 1685 size_t src_len) 1686 { 1687 if (!dst) 1688 return -EINVAL; 1689 1690 if (!dst_len) 1691 return -E2BIG; 1692 1693 if (!src || !src_len) { 1694 memset(dst, 0, dst_len); 1695 return -EINVAL; 1696 } 1697 1698 memcpy(dst, src, min(dst_len, src_len)); 1699 1700 if (dst_len > src_len) { 1701 memset(dst + src_len, '\0', dst_len - src_len); 1702 return src_len; 1703 } 1704 1705 dst[dst_len - 1] = '\0'; 1706 1707 return -E2BIG; 1708 } 1709 1710 BPF_CALL_3(bpf_sysctl_get_current_value, struct bpf_sysctl_kern *, ctx, 1711 char *, buf, size_t, buf_len) 1712 { 1713 return copy_sysctl_value(buf, buf_len, ctx->cur_val, ctx->cur_len); 1714 } 1715 1716 static const struct bpf_func_proto bpf_sysctl_get_current_value_proto = { 1717 .func = bpf_sysctl_get_current_value, 1718 .gpl_only = false, 1719 .ret_type = RET_INTEGER, 1720 .arg1_type = ARG_PTR_TO_CTX, 1721 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1722 .arg3_type = ARG_CONST_SIZE, 1723 }; 1724 1725 BPF_CALL_3(bpf_sysctl_get_new_value, struct bpf_sysctl_kern *, ctx, char *, buf, 1726 size_t, buf_len) 1727 { 1728 if (!ctx->write) { 1729 if (buf && buf_len) 1730 memset(buf, '\0', buf_len); 1731 return -EINVAL; 1732 } 1733 return copy_sysctl_value(buf, buf_len, ctx->new_val, ctx->new_len); 1734 } 1735 1736 static const struct bpf_func_proto bpf_sysctl_get_new_value_proto = { 1737 .func = bpf_sysctl_get_new_value, 1738 .gpl_only = false, 1739 .ret_type = RET_INTEGER, 1740 .arg1_type = ARG_PTR_TO_CTX, 1741 .arg2_type = ARG_PTR_TO_UNINIT_MEM, 1742 .arg3_type = ARG_CONST_SIZE, 1743 }; 1744 1745 BPF_CALL_3(bpf_sysctl_set_new_value, struct bpf_sysctl_kern *, ctx, 1746 const char *, buf, size_t, buf_len) 1747 { 1748 if (!ctx->write || !ctx->new_val || !ctx->new_len || !buf || !buf_len) 1749 return -EINVAL; 1750 1751 if (buf_len > PAGE_SIZE - 1) 1752 return -E2BIG; 1753 1754 memcpy(ctx->new_val, buf, buf_len); 1755 ctx->new_len = buf_len; 1756 ctx->new_updated = 1; 1757 1758 return 0; 1759 } 1760 1761 static const struct bpf_func_proto bpf_sysctl_set_new_value_proto = { 1762 .func = bpf_sysctl_set_new_value, 1763 .gpl_only = false, 1764 .ret_type = RET_INTEGER, 1765 .arg1_type = ARG_PTR_TO_CTX, 1766 .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, 1767 .arg3_type = ARG_CONST_SIZE, 1768 }; 1769 1770 static const struct bpf_func_proto * 1771 sysctl_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1772 { 1773 switch (func_id) { 1774 case BPF_FUNC_strtol: 1775 return &bpf_strtol_proto; 1776 case BPF_FUNC_strtoul: 1777 return &bpf_strtoul_proto; 1778 case BPF_FUNC_sysctl_get_name: 1779 return &bpf_sysctl_get_name_proto; 1780 case BPF_FUNC_sysctl_get_current_value: 1781 return &bpf_sysctl_get_current_value_proto; 1782 case BPF_FUNC_sysctl_get_new_value: 1783 return &bpf_sysctl_get_new_value_proto; 1784 case BPF_FUNC_sysctl_set_new_value: 1785 return &bpf_sysctl_set_new_value_proto; 1786 case BPF_FUNC_ktime_get_coarse_ns: 1787 return &bpf_ktime_get_coarse_ns_proto; 1788 default: 1789 return cgroup_base_func_proto(func_id, prog); 1790 } 1791 } 1792 1793 static bool sysctl_is_valid_access(int off, int size, enum bpf_access_type type, 1794 const struct bpf_prog *prog, 1795 struct bpf_insn_access_aux *info) 1796 { 1797 const int size_default = sizeof(__u32); 1798 1799 if (off < 0 || off + size > sizeof(struct bpf_sysctl) || off % size) 1800 return false; 1801 1802 switch (off) { 1803 case bpf_ctx_range(struct bpf_sysctl, write): 1804 if (type != BPF_READ) 1805 return false; 1806 bpf_ctx_record_field_size(info, size_default); 1807 return bpf_ctx_narrow_access_ok(off, size, size_default); 1808 case bpf_ctx_range(struct bpf_sysctl, file_pos): 1809 if (type == BPF_READ) { 1810 bpf_ctx_record_field_size(info, size_default); 1811 return bpf_ctx_narrow_access_ok(off, size, size_default); 1812 } else { 1813 return size == size_default; 1814 } 1815 default: 1816 return false; 1817 } 1818 } 1819 1820 static u32 sysctl_convert_ctx_access(enum bpf_access_type type, 1821 const struct bpf_insn *si, 1822 struct bpf_insn *insn_buf, 1823 struct bpf_prog *prog, u32 *target_size) 1824 { 1825 struct bpf_insn *insn = insn_buf; 1826 u32 read_size; 1827 1828 switch (si->off) { 1829 case offsetof(struct bpf_sysctl, write): 1830 *insn++ = BPF_LDX_MEM( 1831 BPF_SIZE(si->code), si->dst_reg, si->src_reg, 1832 bpf_target_off(struct bpf_sysctl_kern, write, 1833 sizeof_field(struct bpf_sysctl_kern, 1834 write), 1835 target_size)); 1836 break; 1837 case offsetof(struct bpf_sysctl, file_pos): 1838 /* ppos is a pointer so it should be accessed via indirect 1839 * loads and stores. Also for stores additional temporary 1840 * register is used since neither src_reg nor dst_reg can be 1841 * overridden. 1842 */ 1843 if (type == BPF_WRITE) { 1844 int treg = BPF_REG_9; 1845 1846 if (si->src_reg == treg || si->dst_reg == treg) 1847 --treg; 1848 if (si->src_reg == treg || si->dst_reg == treg) 1849 --treg; 1850 *insn++ = BPF_STX_MEM( 1851 BPF_DW, si->dst_reg, treg, 1852 offsetof(struct bpf_sysctl_kern, tmp_reg)); 1853 *insn++ = BPF_LDX_MEM( 1854 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos), 1855 treg, si->dst_reg, 1856 offsetof(struct bpf_sysctl_kern, ppos)); 1857 *insn++ = BPF_STX_MEM( 1858 BPF_SIZEOF(u32), treg, si->src_reg, 1859 bpf_ctx_narrow_access_offset( 1860 0, sizeof(u32), sizeof(loff_t))); 1861 *insn++ = BPF_LDX_MEM( 1862 BPF_DW, treg, si->dst_reg, 1863 offsetof(struct bpf_sysctl_kern, tmp_reg)); 1864 } else { 1865 *insn++ = BPF_LDX_MEM( 1866 BPF_FIELD_SIZEOF(struct bpf_sysctl_kern, ppos), 1867 si->dst_reg, si->src_reg, 1868 offsetof(struct bpf_sysctl_kern, ppos)); 1869 read_size = bpf_size_to_bytes(BPF_SIZE(si->code)); 1870 *insn++ = BPF_LDX_MEM( 1871 BPF_SIZE(si->code), si->dst_reg, si->dst_reg, 1872 bpf_ctx_narrow_access_offset( 1873 0, read_size, sizeof(loff_t))); 1874 } 1875 *target_size = sizeof(u32); 1876 break; 1877 } 1878 1879 return insn - insn_buf; 1880 } 1881 1882 const struct bpf_verifier_ops cg_sysctl_verifier_ops = { 1883 .get_func_proto = sysctl_func_proto, 1884 .is_valid_access = sysctl_is_valid_access, 1885 .convert_ctx_access = sysctl_convert_ctx_access, 1886 }; 1887 1888 const struct bpf_prog_ops cg_sysctl_prog_ops = { 1889 }; 1890 1891 #ifdef CONFIG_NET 1892 BPF_CALL_1(bpf_get_netns_cookie_sockopt, struct bpf_sockopt_kern *, ctx) 1893 { 1894 const struct net *net = ctx ? sock_net(ctx->sk) : &init_net; 1895 1896 return net->net_cookie; 1897 } 1898 1899 static const struct bpf_func_proto bpf_get_netns_cookie_sockopt_proto = { 1900 .func = bpf_get_netns_cookie_sockopt, 1901 .gpl_only = false, 1902 .ret_type = RET_INTEGER, 1903 .arg1_type = ARG_PTR_TO_CTX_OR_NULL, 1904 }; 1905 #endif 1906 1907 static const struct bpf_func_proto * 1908 cg_sockopt_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 1909 { 1910 switch (func_id) { 1911 #ifdef CONFIG_NET 1912 case BPF_FUNC_get_netns_cookie: 1913 return &bpf_get_netns_cookie_sockopt_proto; 1914 case BPF_FUNC_sk_storage_get: 1915 return &bpf_sk_storage_get_proto; 1916 case BPF_FUNC_sk_storage_delete: 1917 return &bpf_sk_storage_delete_proto; 1918 case BPF_FUNC_setsockopt: 1919 if (prog->expected_attach_type == BPF_CGROUP_SETSOCKOPT) 1920 return &bpf_sk_setsockopt_proto; 1921 return NULL; 1922 case BPF_FUNC_getsockopt: 1923 if (prog->expected_attach_type == BPF_CGROUP_SETSOCKOPT) 1924 return &bpf_sk_getsockopt_proto; 1925 return NULL; 1926 #endif 1927 #ifdef CONFIG_INET 1928 case BPF_FUNC_tcp_sock: 1929 return &bpf_tcp_sock_proto; 1930 #endif 1931 default: 1932 return cgroup_base_func_proto(func_id, prog); 1933 } 1934 } 1935 1936 static bool cg_sockopt_is_valid_access(int off, int size, 1937 enum bpf_access_type type, 1938 const struct bpf_prog *prog, 1939 struct bpf_insn_access_aux *info) 1940 { 1941 const int size_default = sizeof(__u32); 1942 1943 if (off < 0 || off >= sizeof(struct bpf_sockopt)) 1944 return false; 1945 1946 if (off % size != 0) 1947 return false; 1948 1949 if (type == BPF_WRITE) { 1950 switch (off) { 1951 case offsetof(struct bpf_sockopt, retval): 1952 if (size != size_default) 1953 return false; 1954 return prog->expected_attach_type == 1955 BPF_CGROUP_GETSOCKOPT; 1956 case offsetof(struct bpf_sockopt, optname): 1957 fallthrough; 1958 case offsetof(struct bpf_sockopt, level): 1959 if (size != size_default) 1960 return false; 1961 return prog->expected_attach_type == 1962 BPF_CGROUP_SETSOCKOPT; 1963 case offsetof(struct bpf_sockopt, optlen): 1964 return size == size_default; 1965 default: 1966 return false; 1967 } 1968 } 1969 1970 switch (off) { 1971 case offsetof(struct bpf_sockopt, sk): 1972 if (size != sizeof(__u64)) 1973 return false; 1974 info->reg_type = PTR_TO_SOCKET; 1975 break; 1976 case offsetof(struct bpf_sockopt, optval): 1977 if (size != sizeof(__u64)) 1978 return false; 1979 info->reg_type = PTR_TO_PACKET; 1980 break; 1981 case offsetof(struct bpf_sockopt, optval_end): 1982 if (size != sizeof(__u64)) 1983 return false; 1984 info->reg_type = PTR_TO_PACKET_END; 1985 break; 1986 case offsetof(struct bpf_sockopt, retval): 1987 if (size != size_default) 1988 return false; 1989 return prog->expected_attach_type == BPF_CGROUP_GETSOCKOPT; 1990 default: 1991 if (size != size_default) 1992 return false; 1993 break; 1994 } 1995 return true; 1996 } 1997 1998 #define CG_SOCKOPT_ACCESS_FIELD(T, F) \ 1999 T(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, F), \ 2000 si->dst_reg, si->src_reg, \ 2001 offsetof(struct bpf_sockopt_kern, F)) 2002 2003 static u32 cg_sockopt_convert_ctx_access(enum bpf_access_type type, 2004 const struct bpf_insn *si, 2005 struct bpf_insn *insn_buf, 2006 struct bpf_prog *prog, 2007 u32 *target_size) 2008 { 2009 struct bpf_insn *insn = insn_buf; 2010 2011 switch (si->off) { 2012 case offsetof(struct bpf_sockopt, sk): 2013 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, sk); 2014 break; 2015 case offsetof(struct bpf_sockopt, level): 2016 if (type == BPF_WRITE) 2017 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, level); 2018 else 2019 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, level); 2020 break; 2021 case offsetof(struct bpf_sockopt, optname): 2022 if (type == BPF_WRITE) 2023 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optname); 2024 else 2025 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optname); 2026 break; 2027 case offsetof(struct bpf_sockopt, optlen): 2028 if (type == BPF_WRITE) 2029 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_STX_MEM, optlen); 2030 else 2031 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optlen); 2032 break; 2033 case offsetof(struct bpf_sockopt, retval): 2034 BUILD_BUG_ON(offsetof(struct bpf_cg_run_ctx, run_ctx) != 0); 2035 2036 if (type == BPF_WRITE) { 2037 int treg = BPF_REG_9; 2038 2039 if (si->src_reg == treg || si->dst_reg == treg) 2040 --treg; 2041 if (si->src_reg == treg || si->dst_reg == treg) 2042 --treg; 2043 *insn++ = BPF_STX_MEM(BPF_DW, si->dst_reg, treg, 2044 offsetof(struct bpf_sockopt_kern, tmp_reg)); 2045 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, current_task), 2046 treg, si->dst_reg, 2047 offsetof(struct bpf_sockopt_kern, current_task)); 2048 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct task_struct, bpf_ctx), 2049 treg, treg, 2050 offsetof(struct task_struct, bpf_ctx)); 2051 *insn++ = BPF_STX_MEM(BPF_FIELD_SIZEOF(struct bpf_cg_run_ctx, retval), 2052 treg, si->src_reg, 2053 offsetof(struct bpf_cg_run_ctx, retval)); 2054 *insn++ = BPF_LDX_MEM(BPF_DW, treg, si->dst_reg, 2055 offsetof(struct bpf_sockopt_kern, tmp_reg)); 2056 } else { 2057 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_sockopt_kern, current_task), 2058 si->dst_reg, si->src_reg, 2059 offsetof(struct bpf_sockopt_kern, current_task)); 2060 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct task_struct, bpf_ctx), 2061 si->dst_reg, si->dst_reg, 2062 offsetof(struct task_struct, bpf_ctx)); 2063 *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_cg_run_ctx, retval), 2064 si->dst_reg, si->dst_reg, 2065 offsetof(struct bpf_cg_run_ctx, retval)); 2066 } 2067 break; 2068 case offsetof(struct bpf_sockopt, optval): 2069 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval); 2070 break; 2071 case offsetof(struct bpf_sockopt, optval_end): 2072 *insn++ = CG_SOCKOPT_ACCESS_FIELD(BPF_LDX_MEM, optval_end); 2073 break; 2074 } 2075 2076 return insn - insn_buf; 2077 } 2078 2079 static int cg_sockopt_get_prologue(struct bpf_insn *insn_buf, 2080 bool direct_write, 2081 const struct bpf_prog *prog) 2082 { 2083 /* Nothing to do for sockopt argument. The data is kzalloc'ated. 2084 */ 2085 return 0; 2086 } 2087 2088 const struct bpf_verifier_ops cg_sockopt_verifier_ops = { 2089 .get_func_proto = cg_sockopt_func_proto, 2090 .is_valid_access = cg_sockopt_is_valid_access, 2091 .convert_ctx_access = cg_sockopt_convert_ctx_access, 2092 .gen_prologue = cg_sockopt_get_prologue, 2093 }; 2094 2095 const struct bpf_prog_ops cg_sockopt_prog_ops = { 2096 }; 2097