1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ 3 #include <linux/mm.h> 4 #include <linux/llist.h> 5 #include <linux/bpf.h> 6 #include <linux/irq_work.h> 7 #include <linux/bpf_mem_alloc.h> 8 #include <linux/memcontrol.h> 9 #include <asm/local.h> 10 11 /* Any context (including NMI) BPF specific memory allocator. 12 * 13 * Tracing BPF programs can attach to kprobe and fentry. Hence they 14 * run in unknown context where calling plain kmalloc() might not be safe. 15 * 16 * Front-end kmalloc() with per-cpu per-bucket cache of free elements. 17 * Refill this cache asynchronously from irq_work. 18 * 19 * CPU_0 buckets 20 * 16 32 64 96 128 196 256 512 1024 2048 4096 21 * ... 22 * CPU_N buckets 23 * 16 32 64 96 128 196 256 512 1024 2048 4096 24 * 25 * The buckets are prefilled at the start. 26 * BPF programs always run with migration disabled. 27 * It's safe to allocate from cache of the current cpu with irqs disabled. 28 * Free-ing is always done into bucket of the current cpu as well. 29 * irq_work trims extra free elements from buckets with kfree 30 * and refills them with kmalloc, so global kmalloc logic takes care 31 * of freeing objects allocated by one cpu and freed on another. 32 * 33 * Every allocated objected is padded with extra 8 bytes that contains 34 * struct llist_node. 35 */ 36 #define LLIST_NODE_SZ sizeof(struct llist_node) 37 38 /* similar to kmalloc, but sizeof == 8 bucket is gone */ 39 static u8 size_index[24] __ro_after_init = { 40 3, /* 8 */ 41 3, /* 16 */ 42 4, /* 24 */ 43 4, /* 32 */ 44 5, /* 40 */ 45 5, /* 48 */ 46 5, /* 56 */ 47 5, /* 64 */ 48 1, /* 72 */ 49 1, /* 80 */ 50 1, /* 88 */ 51 1, /* 96 */ 52 6, /* 104 */ 53 6, /* 112 */ 54 6, /* 120 */ 55 6, /* 128 */ 56 2, /* 136 */ 57 2, /* 144 */ 58 2, /* 152 */ 59 2, /* 160 */ 60 2, /* 168 */ 61 2, /* 176 */ 62 2, /* 184 */ 63 2 /* 192 */ 64 }; 65 66 static int bpf_mem_cache_idx(size_t size) 67 { 68 if (!size || size > 4096) 69 return -1; 70 71 if (size <= 192) 72 return size_index[(size - 1) / 8] - 1; 73 74 return fls(size - 1) - 2; 75 } 76 77 #define NUM_CACHES 11 78 79 struct bpf_mem_cache { 80 /* per-cpu list of free objects of size 'unit_size'. 81 * All accesses are done with interrupts disabled and 'active' counter 82 * protection with __llist_add() and __llist_del_first(). 83 */ 84 struct llist_head free_llist; 85 local_t active; 86 87 /* Operations on the free_list from unit_alloc/unit_free/bpf_mem_refill 88 * are sequenced by per-cpu 'active' counter. But unit_free() cannot 89 * fail. When 'active' is busy the unit_free() will add an object to 90 * free_llist_extra. 91 */ 92 struct llist_head free_llist_extra; 93 94 struct irq_work refill_work; 95 struct obj_cgroup *objcg; 96 int unit_size; 97 /* count of objects in free_llist */ 98 int free_cnt; 99 int low_watermark, high_watermark, batch; 100 int percpu_size; 101 bool draining; 102 struct bpf_mem_cache *tgt; 103 104 /* list of objects to be freed after RCU GP */ 105 struct llist_head free_by_rcu; 106 struct llist_node *free_by_rcu_tail; 107 struct llist_head waiting_for_gp; 108 struct llist_node *waiting_for_gp_tail; 109 struct rcu_head rcu; 110 atomic_t call_rcu_in_progress; 111 struct llist_head free_llist_extra_rcu; 112 113 /* list of objects to be freed after RCU tasks trace GP */ 114 struct llist_head free_by_rcu_ttrace; 115 struct llist_head waiting_for_gp_ttrace; 116 struct rcu_head rcu_ttrace; 117 atomic_t call_rcu_ttrace_in_progress; 118 }; 119 120 struct bpf_mem_caches { 121 struct bpf_mem_cache cache[NUM_CACHES]; 122 }; 123 124 static struct llist_node notrace *__llist_del_first(struct llist_head *head) 125 { 126 struct llist_node *entry, *next; 127 128 entry = head->first; 129 if (!entry) 130 return NULL; 131 next = entry->next; 132 head->first = next; 133 return entry; 134 } 135 136 static void *__alloc(struct bpf_mem_cache *c, int node, gfp_t flags) 137 { 138 if (c->percpu_size) { 139 void **obj = kmalloc_node(c->percpu_size, flags, node); 140 void *pptr = __alloc_percpu_gfp(c->unit_size, 8, flags); 141 142 if (!obj || !pptr) { 143 free_percpu(pptr); 144 kfree(obj); 145 return NULL; 146 } 147 obj[1] = pptr; 148 return obj; 149 } 150 151 return kmalloc_node(c->unit_size, flags | __GFP_ZERO, node); 152 } 153 154 static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c) 155 { 156 #ifdef CONFIG_MEMCG_KMEM 157 if (c->objcg) 158 return get_mem_cgroup_from_objcg(c->objcg); 159 #endif 160 161 #ifdef CONFIG_MEMCG 162 return root_mem_cgroup; 163 #else 164 return NULL; 165 #endif 166 } 167 168 static void inc_active(struct bpf_mem_cache *c, unsigned long *flags) 169 { 170 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 171 /* In RT irq_work runs in per-cpu kthread, so disable 172 * interrupts to avoid preemption and interrupts and 173 * reduce the chance of bpf prog executing on this cpu 174 * when active counter is busy. 175 */ 176 local_irq_save(*flags); 177 /* alloc_bulk runs from irq_work which will not preempt a bpf 178 * program that does unit_alloc/unit_free since IRQs are 179 * disabled there. There is no race to increment 'active' 180 * counter. It protects free_llist from corruption in case NMI 181 * bpf prog preempted this loop. 182 */ 183 WARN_ON_ONCE(local_inc_return(&c->active) != 1); 184 } 185 186 static void dec_active(struct bpf_mem_cache *c, unsigned long flags) 187 { 188 local_dec(&c->active); 189 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 190 local_irq_restore(flags); 191 } 192 193 static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj) 194 { 195 unsigned long flags; 196 197 inc_active(c, &flags); 198 __llist_add(obj, &c->free_llist); 199 c->free_cnt++; 200 dec_active(c, flags); 201 } 202 203 /* Mostly runs from irq_work except __init phase. */ 204 static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node) 205 { 206 struct mem_cgroup *memcg = NULL, *old_memcg; 207 void *obj; 208 int i; 209 210 for (i = 0; i < cnt; i++) { 211 /* 212 * For every 'c' llist_del_first(&c->free_by_rcu_ttrace); is 213 * done only by one CPU == current CPU. Other CPUs might 214 * llist_add() and llist_del_all() in parallel. 215 */ 216 obj = llist_del_first(&c->free_by_rcu_ttrace); 217 if (!obj) 218 break; 219 add_obj_to_free_list(c, obj); 220 } 221 if (i >= cnt) 222 return; 223 224 for (; i < cnt; i++) { 225 obj = llist_del_first(&c->waiting_for_gp_ttrace); 226 if (!obj) 227 break; 228 add_obj_to_free_list(c, obj); 229 } 230 if (i >= cnt) 231 return; 232 233 memcg = get_memcg(c); 234 old_memcg = set_active_memcg(memcg); 235 for (; i < cnt; i++) { 236 /* Allocate, but don't deplete atomic reserves that typical 237 * GFP_ATOMIC would do. irq_work runs on this cpu and kmalloc 238 * will allocate from the current numa node which is what we 239 * want here. 240 */ 241 obj = __alloc(c, node, GFP_NOWAIT | __GFP_NOWARN | __GFP_ACCOUNT); 242 if (!obj) 243 break; 244 add_obj_to_free_list(c, obj); 245 } 246 set_active_memcg(old_memcg); 247 mem_cgroup_put(memcg); 248 } 249 250 static void free_one(void *obj, bool percpu) 251 { 252 if (percpu) { 253 free_percpu(((void **)obj)[1]); 254 kfree(obj); 255 return; 256 } 257 258 kfree(obj); 259 } 260 261 static int free_all(struct llist_node *llnode, bool percpu) 262 { 263 struct llist_node *pos, *t; 264 int cnt = 0; 265 266 llist_for_each_safe(pos, t, llnode) { 267 free_one(pos, percpu); 268 cnt++; 269 } 270 return cnt; 271 } 272 273 static void __free_rcu(struct rcu_head *head) 274 { 275 struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu_ttrace); 276 277 free_all(llist_del_all(&c->waiting_for_gp_ttrace), !!c->percpu_size); 278 atomic_set(&c->call_rcu_ttrace_in_progress, 0); 279 } 280 281 static void __free_rcu_tasks_trace(struct rcu_head *head) 282 { 283 /* If RCU Tasks Trace grace period implies RCU grace period, 284 * there is no need to invoke call_rcu(). 285 */ 286 if (rcu_trace_implies_rcu_gp()) 287 __free_rcu(head); 288 else 289 call_rcu(head, __free_rcu); 290 } 291 292 static void enque_to_free(struct bpf_mem_cache *c, void *obj) 293 { 294 struct llist_node *llnode = obj; 295 296 /* bpf_mem_cache is a per-cpu object. Freeing happens in irq_work. 297 * Nothing races to add to free_by_rcu_ttrace list. 298 */ 299 llist_add(llnode, &c->free_by_rcu_ttrace); 300 } 301 302 static void do_call_rcu_ttrace(struct bpf_mem_cache *c) 303 { 304 struct llist_node *llnode, *t; 305 306 if (atomic_xchg(&c->call_rcu_ttrace_in_progress, 1)) { 307 if (unlikely(READ_ONCE(c->draining))) { 308 llnode = llist_del_all(&c->free_by_rcu_ttrace); 309 free_all(llnode, !!c->percpu_size); 310 } 311 return; 312 } 313 314 WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace)); 315 llist_for_each_safe(llnode, t, llist_del_all(&c->free_by_rcu_ttrace)) 316 llist_add(llnode, &c->waiting_for_gp_ttrace); 317 318 if (unlikely(READ_ONCE(c->draining))) { 319 __free_rcu(&c->rcu_ttrace); 320 return; 321 } 322 323 /* Use call_rcu_tasks_trace() to wait for sleepable progs to finish. 324 * If RCU Tasks Trace grace period implies RCU grace period, free 325 * these elements directly, else use call_rcu() to wait for normal 326 * progs to finish and finally do free_one() on each element. 327 */ 328 call_rcu_tasks_trace(&c->rcu_ttrace, __free_rcu_tasks_trace); 329 } 330 331 static void free_bulk(struct bpf_mem_cache *c) 332 { 333 struct bpf_mem_cache *tgt = c->tgt; 334 struct llist_node *llnode, *t; 335 unsigned long flags; 336 int cnt; 337 338 WARN_ON_ONCE(tgt->unit_size != c->unit_size); 339 340 do { 341 inc_active(c, &flags); 342 llnode = __llist_del_first(&c->free_llist); 343 if (llnode) 344 cnt = --c->free_cnt; 345 else 346 cnt = 0; 347 dec_active(c, flags); 348 if (llnode) 349 enque_to_free(tgt, llnode); 350 } while (cnt > (c->high_watermark + c->low_watermark) / 2); 351 352 /* and drain free_llist_extra */ 353 llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra)) 354 enque_to_free(tgt, llnode); 355 do_call_rcu_ttrace(tgt); 356 } 357 358 static void __free_by_rcu(struct rcu_head *head) 359 { 360 struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu); 361 struct bpf_mem_cache *tgt = c->tgt; 362 struct llist_node *llnode; 363 364 llnode = llist_del_all(&c->waiting_for_gp); 365 if (!llnode) 366 goto out; 367 368 llist_add_batch(llnode, c->waiting_for_gp_tail, &tgt->free_by_rcu_ttrace); 369 370 /* Objects went through regular RCU GP. Send them to RCU tasks trace */ 371 do_call_rcu_ttrace(tgt); 372 out: 373 atomic_set(&c->call_rcu_in_progress, 0); 374 } 375 376 static void check_free_by_rcu(struct bpf_mem_cache *c) 377 { 378 struct llist_node *llnode, *t; 379 unsigned long flags; 380 381 /* drain free_llist_extra_rcu */ 382 if (unlikely(!llist_empty(&c->free_llist_extra_rcu))) { 383 inc_active(c, &flags); 384 llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra_rcu)) 385 if (__llist_add(llnode, &c->free_by_rcu)) 386 c->free_by_rcu_tail = llnode; 387 dec_active(c, flags); 388 } 389 390 if (llist_empty(&c->free_by_rcu)) 391 return; 392 393 if (atomic_xchg(&c->call_rcu_in_progress, 1)) { 394 /* 395 * Instead of kmalloc-ing new rcu_head and triggering 10k 396 * call_rcu() to hit rcutree.qhimark and force RCU to notice 397 * the overload just ask RCU to hurry up. There could be many 398 * objects in free_by_rcu list. 399 * This hint reduces memory consumption for an artificial 400 * benchmark from 2 Gbyte to 150 Mbyte. 401 */ 402 rcu_request_urgent_qs_task(current); 403 return; 404 } 405 406 WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp)); 407 408 inc_active(c, &flags); 409 WRITE_ONCE(c->waiting_for_gp.first, __llist_del_all(&c->free_by_rcu)); 410 c->waiting_for_gp_tail = c->free_by_rcu_tail; 411 dec_active(c, flags); 412 413 if (unlikely(READ_ONCE(c->draining))) { 414 free_all(llist_del_all(&c->waiting_for_gp), !!c->percpu_size); 415 atomic_set(&c->call_rcu_in_progress, 0); 416 } else { 417 call_rcu_hurry(&c->rcu, __free_by_rcu); 418 } 419 } 420 421 static void bpf_mem_refill(struct irq_work *work) 422 { 423 struct bpf_mem_cache *c = container_of(work, struct bpf_mem_cache, refill_work); 424 int cnt; 425 426 /* Racy access to free_cnt. It doesn't need to be 100% accurate */ 427 cnt = c->free_cnt; 428 if (cnt < c->low_watermark) 429 /* irq_work runs on this cpu and kmalloc will allocate 430 * from the current numa node which is what we want here. 431 */ 432 alloc_bulk(c, c->batch, NUMA_NO_NODE); 433 else if (cnt > c->high_watermark) 434 free_bulk(c); 435 436 check_free_by_rcu(c); 437 } 438 439 static void notrace irq_work_raise(struct bpf_mem_cache *c) 440 { 441 irq_work_queue(&c->refill_work); 442 } 443 444 /* For typical bpf map case that uses bpf_mem_cache_alloc and single bucket 445 * the freelist cache will be elem_size * 64 (or less) on each cpu. 446 * 447 * For bpf programs that don't have statically known allocation sizes and 448 * assuming (low_mark + high_mark) / 2 as an average number of elements per 449 * bucket and all buckets are used the total amount of memory in freelists 450 * on each cpu will be: 451 * 64*16 + 64*32 + 64*64 + 64*96 + 64*128 + 64*196 + 64*256 + 32*512 + 16*1024 + 8*2048 + 4*4096 452 * == ~ 116 Kbyte using below heuristic. 453 * Initialized, but unused bpf allocator (not bpf map specific one) will 454 * consume ~ 11 Kbyte per cpu. 455 * Typical case will be between 11K and 116K closer to 11K. 456 * bpf progs can and should share bpf_mem_cache when possible. 457 */ 458 459 static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu) 460 { 461 init_irq_work(&c->refill_work, bpf_mem_refill); 462 if (c->unit_size <= 256) { 463 c->low_watermark = 32; 464 c->high_watermark = 96; 465 } else { 466 /* When page_size == 4k, order-0 cache will have low_mark == 2 467 * and high_mark == 6 with batch alloc of 3 individual pages at 468 * a time. 469 * 8k allocs and above low == 1, high == 3, batch == 1. 470 */ 471 c->low_watermark = max(32 * 256 / c->unit_size, 1); 472 c->high_watermark = max(96 * 256 / c->unit_size, 3); 473 } 474 c->batch = max((c->high_watermark - c->low_watermark) / 4 * 3, 1); 475 476 /* To avoid consuming memory assume that 1st run of bpf 477 * prog won't be doing more than 4 map_update_elem from 478 * irq disabled region 479 */ 480 alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu)); 481 } 482 483 /* When size != 0 bpf_mem_cache for each cpu. 484 * This is typical bpf hash map use case when all elements have equal size. 485 * 486 * When size == 0 allocate 11 bpf_mem_cache-s for each cpu, then rely on 487 * kmalloc/kfree. Max allocation size is 4096 in this case. 488 * This is bpf_dynptr and bpf_kptr use case. 489 */ 490 int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu) 491 { 492 static u16 sizes[NUM_CACHES] = {96, 192, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096}; 493 struct bpf_mem_caches *cc, __percpu *pcc; 494 struct bpf_mem_cache *c, __percpu *pc; 495 struct obj_cgroup *objcg = NULL; 496 int cpu, i, unit_size, percpu_size = 0; 497 498 if (size) { 499 pc = __alloc_percpu_gfp(sizeof(*pc), 8, GFP_KERNEL); 500 if (!pc) 501 return -ENOMEM; 502 503 if (percpu) 504 /* room for llist_node and per-cpu pointer */ 505 percpu_size = LLIST_NODE_SZ + sizeof(void *); 506 else 507 size += LLIST_NODE_SZ; /* room for llist_node */ 508 unit_size = size; 509 510 #ifdef CONFIG_MEMCG_KMEM 511 if (memcg_bpf_enabled()) 512 objcg = get_obj_cgroup_from_current(); 513 #endif 514 for_each_possible_cpu(cpu) { 515 c = per_cpu_ptr(pc, cpu); 516 c->unit_size = unit_size; 517 c->objcg = objcg; 518 c->percpu_size = percpu_size; 519 c->tgt = c; 520 prefill_mem_cache(c, cpu); 521 } 522 ma->cache = pc; 523 return 0; 524 } 525 526 /* size == 0 && percpu is an invalid combination */ 527 if (WARN_ON_ONCE(percpu)) 528 return -EINVAL; 529 530 pcc = __alloc_percpu_gfp(sizeof(*cc), 8, GFP_KERNEL); 531 if (!pcc) 532 return -ENOMEM; 533 #ifdef CONFIG_MEMCG_KMEM 534 objcg = get_obj_cgroup_from_current(); 535 #endif 536 for_each_possible_cpu(cpu) { 537 cc = per_cpu_ptr(pcc, cpu); 538 for (i = 0; i < NUM_CACHES; i++) { 539 c = &cc->cache[i]; 540 c->unit_size = sizes[i]; 541 c->objcg = objcg; 542 c->tgt = c; 543 prefill_mem_cache(c, cpu); 544 } 545 } 546 ma->caches = pcc; 547 return 0; 548 } 549 550 static void drain_mem_cache(struct bpf_mem_cache *c) 551 { 552 bool percpu = !!c->percpu_size; 553 554 /* No progs are using this bpf_mem_cache, but htab_map_free() called 555 * bpf_mem_cache_free() for all remaining elements and they can be in 556 * free_by_rcu_ttrace or in waiting_for_gp_ttrace lists, so drain those lists now. 557 * 558 * Except for waiting_for_gp_ttrace list, there are no concurrent operations 559 * on these lists, so it is safe to use __llist_del_all(). 560 */ 561 free_all(llist_del_all(&c->free_by_rcu_ttrace), percpu); 562 free_all(llist_del_all(&c->waiting_for_gp_ttrace), percpu); 563 free_all(__llist_del_all(&c->free_llist), percpu); 564 free_all(__llist_del_all(&c->free_llist_extra), percpu); 565 free_all(__llist_del_all(&c->free_by_rcu), percpu); 566 free_all(__llist_del_all(&c->free_llist_extra_rcu), percpu); 567 free_all(llist_del_all(&c->waiting_for_gp), percpu); 568 } 569 570 static void free_mem_alloc_no_barrier(struct bpf_mem_alloc *ma) 571 { 572 free_percpu(ma->cache); 573 free_percpu(ma->caches); 574 ma->cache = NULL; 575 ma->caches = NULL; 576 } 577 578 static void free_mem_alloc(struct bpf_mem_alloc *ma) 579 { 580 /* waiting_for_gp[_ttrace] lists were drained, but RCU callbacks 581 * might still execute. Wait for them. 582 * 583 * rcu_barrier_tasks_trace() doesn't imply synchronize_rcu_tasks_trace(), 584 * but rcu_barrier_tasks_trace() and rcu_barrier() below are only used 585 * to wait for the pending __free_rcu_tasks_trace() and __free_rcu(), 586 * so if call_rcu(head, __free_rcu) is skipped due to 587 * rcu_trace_implies_rcu_gp(), it will be OK to skip rcu_barrier() by 588 * using rcu_trace_implies_rcu_gp() as well. 589 */ 590 rcu_barrier(); /* wait for __free_by_rcu */ 591 rcu_barrier_tasks_trace(); /* wait for __free_rcu */ 592 if (!rcu_trace_implies_rcu_gp()) 593 rcu_barrier(); 594 free_mem_alloc_no_barrier(ma); 595 } 596 597 static void free_mem_alloc_deferred(struct work_struct *work) 598 { 599 struct bpf_mem_alloc *ma = container_of(work, struct bpf_mem_alloc, work); 600 601 free_mem_alloc(ma); 602 kfree(ma); 603 } 604 605 static void destroy_mem_alloc(struct bpf_mem_alloc *ma, int rcu_in_progress) 606 { 607 struct bpf_mem_alloc *copy; 608 609 if (!rcu_in_progress) { 610 /* Fast path. No callbacks are pending, hence no need to do 611 * rcu_barrier-s. 612 */ 613 free_mem_alloc_no_barrier(ma); 614 return; 615 } 616 617 copy = kmemdup(ma, sizeof(*ma), GFP_KERNEL); 618 if (!copy) { 619 /* Slow path with inline barrier-s */ 620 free_mem_alloc(ma); 621 return; 622 } 623 624 /* Defer barriers into worker to let the rest of map memory to be freed */ 625 memset(ma, 0, sizeof(*ma)); 626 INIT_WORK(©->work, free_mem_alloc_deferred); 627 queue_work(system_unbound_wq, ©->work); 628 } 629 630 void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma) 631 { 632 struct bpf_mem_caches *cc; 633 struct bpf_mem_cache *c; 634 int cpu, i, rcu_in_progress; 635 636 if (ma->cache) { 637 rcu_in_progress = 0; 638 for_each_possible_cpu(cpu) { 639 c = per_cpu_ptr(ma->cache, cpu); 640 WRITE_ONCE(c->draining, true); 641 irq_work_sync(&c->refill_work); 642 drain_mem_cache(c); 643 rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress); 644 rcu_in_progress += atomic_read(&c->call_rcu_in_progress); 645 } 646 /* objcg is the same across cpus */ 647 if (c->objcg) 648 obj_cgroup_put(c->objcg); 649 destroy_mem_alloc(ma, rcu_in_progress); 650 } 651 if (ma->caches) { 652 rcu_in_progress = 0; 653 for_each_possible_cpu(cpu) { 654 cc = per_cpu_ptr(ma->caches, cpu); 655 for (i = 0; i < NUM_CACHES; i++) { 656 c = &cc->cache[i]; 657 WRITE_ONCE(c->draining, true); 658 irq_work_sync(&c->refill_work); 659 drain_mem_cache(c); 660 rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress); 661 rcu_in_progress += atomic_read(&c->call_rcu_in_progress); 662 } 663 } 664 if (c->objcg) 665 obj_cgroup_put(c->objcg); 666 destroy_mem_alloc(ma, rcu_in_progress); 667 } 668 } 669 670 /* notrace is necessary here and in other functions to make sure 671 * bpf programs cannot attach to them and cause llist corruptions. 672 */ 673 static void notrace *unit_alloc(struct bpf_mem_cache *c) 674 { 675 struct llist_node *llnode = NULL; 676 unsigned long flags; 677 int cnt = 0; 678 679 /* Disable irqs to prevent the following race for majority of prog types: 680 * prog_A 681 * bpf_mem_alloc 682 * preemption or irq -> prog_B 683 * bpf_mem_alloc 684 * 685 * but prog_B could be a perf_event NMI prog. 686 * Use per-cpu 'active' counter to order free_list access between 687 * unit_alloc/unit_free/bpf_mem_refill. 688 */ 689 local_irq_save(flags); 690 if (local_inc_return(&c->active) == 1) { 691 llnode = __llist_del_first(&c->free_llist); 692 if (llnode) { 693 cnt = --c->free_cnt; 694 *(struct bpf_mem_cache **)llnode = c; 695 } 696 } 697 local_dec(&c->active); 698 local_irq_restore(flags); 699 700 WARN_ON(cnt < 0); 701 702 if (cnt < c->low_watermark) 703 irq_work_raise(c); 704 return llnode; 705 } 706 707 /* Though 'ptr' object could have been allocated on a different cpu 708 * add it to the free_llist of the current cpu. 709 * Let kfree() logic deal with it when it's later called from irq_work. 710 */ 711 static void notrace unit_free(struct bpf_mem_cache *c, void *ptr) 712 { 713 struct llist_node *llnode = ptr - LLIST_NODE_SZ; 714 unsigned long flags; 715 int cnt = 0; 716 717 BUILD_BUG_ON(LLIST_NODE_SZ > 8); 718 719 /* 720 * Remember bpf_mem_cache that allocated this object. 721 * The hint is not accurate. 722 */ 723 c->tgt = *(struct bpf_mem_cache **)llnode; 724 725 local_irq_save(flags); 726 if (local_inc_return(&c->active) == 1) { 727 __llist_add(llnode, &c->free_llist); 728 cnt = ++c->free_cnt; 729 } else { 730 /* unit_free() cannot fail. Therefore add an object to atomic 731 * llist. free_bulk() will drain it. Though free_llist_extra is 732 * a per-cpu list we have to use atomic llist_add here, since 733 * it also can be interrupted by bpf nmi prog that does another 734 * unit_free() into the same free_llist_extra. 735 */ 736 llist_add(llnode, &c->free_llist_extra); 737 } 738 local_dec(&c->active); 739 local_irq_restore(flags); 740 741 if (cnt > c->high_watermark) 742 /* free few objects from current cpu into global kmalloc pool */ 743 irq_work_raise(c); 744 } 745 746 static void notrace unit_free_rcu(struct bpf_mem_cache *c, void *ptr) 747 { 748 struct llist_node *llnode = ptr - LLIST_NODE_SZ; 749 unsigned long flags; 750 751 c->tgt = *(struct bpf_mem_cache **)llnode; 752 753 local_irq_save(flags); 754 if (local_inc_return(&c->active) == 1) { 755 if (__llist_add(llnode, &c->free_by_rcu)) 756 c->free_by_rcu_tail = llnode; 757 } else { 758 llist_add(llnode, &c->free_llist_extra_rcu); 759 } 760 local_dec(&c->active); 761 local_irq_restore(flags); 762 763 if (!atomic_read(&c->call_rcu_in_progress)) 764 irq_work_raise(c); 765 } 766 767 /* Called from BPF program or from sys_bpf syscall. 768 * In both cases migration is disabled. 769 */ 770 void notrace *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size) 771 { 772 int idx; 773 void *ret; 774 775 if (!size) 776 return ZERO_SIZE_PTR; 777 778 idx = bpf_mem_cache_idx(size + LLIST_NODE_SZ); 779 if (idx < 0) 780 return NULL; 781 782 ret = unit_alloc(this_cpu_ptr(ma->caches)->cache + idx); 783 return !ret ? NULL : ret + LLIST_NODE_SZ; 784 } 785 786 void notrace bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr) 787 { 788 int idx; 789 790 if (!ptr) 791 return; 792 793 idx = bpf_mem_cache_idx(ksize(ptr - LLIST_NODE_SZ)); 794 if (idx < 0) 795 return; 796 797 unit_free(this_cpu_ptr(ma->caches)->cache + idx, ptr); 798 } 799 800 void notrace bpf_mem_free_rcu(struct bpf_mem_alloc *ma, void *ptr) 801 { 802 int idx; 803 804 if (!ptr) 805 return; 806 807 idx = bpf_mem_cache_idx(ksize(ptr - LLIST_NODE_SZ)); 808 if (idx < 0) 809 return; 810 811 unit_free_rcu(this_cpu_ptr(ma->caches)->cache + idx, ptr); 812 } 813 814 void notrace *bpf_mem_cache_alloc(struct bpf_mem_alloc *ma) 815 { 816 void *ret; 817 818 ret = unit_alloc(this_cpu_ptr(ma->cache)); 819 return !ret ? NULL : ret + LLIST_NODE_SZ; 820 } 821 822 void notrace bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr) 823 { 824 if (!ptr) 825 return; 826 827 unit_free(this_cpu_ptr(ma->cache), ptr); 828 } 829 830 void notrace bpf_mem_cache_free_rcu(struct bpf_mem_alloc *ma, void *ptr) 831 { 832 if (!ptr) 833 return; 834 835 unit_free_rcu(this_cpu_ptr(ma->cache), ptr); 836 } 837 838 /* Directly does a kfree() without putting 'ptr' back to the free_llist 839 * for reuse and without waiting for a rcu_tasks_trace gp. 840 * The caller must first go through the rcu_tasks_trace gp for 'ptr' 841 * before calling bpf_mem_cache_raw_free(). 842 * It could be used when the rcu_tasks_trace callback does not have 843 * a hold on the original bpf_mem_alloc object that allocated the 844 * 'ptr'. This should only be used in the uncommon code path. 845 * Otherwise, the bpf_mem_alloc's free_llist cannot be refilled 846 * and may affect performance. 847 */ 848 void bpf_mem_cache_raw_free(void *ptr) 849 { 850 if (!ptr) 851 return; 852 853 kfree(ptr - LLIST_NODE_SZ); 854 } 855 856 /* When flags == GFP_KERNEL, it signals that the caller will not cause 857 * deadlock when using kmalloc. bpf_mem_cache_alloc_flags() will use 858 * kmalloc if the free_llist is empty. 859 */ 860 void notrace *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags) 861 { 862 struct bpf_mem_cache *c; 863 void *ret; 864 865 c = this_cpu_ptr(ma->cache); 866 867 ret = unit_alloc(c); 868 if (!ret && flags == GFP_KERNEL) { 869 struct mem_cgroup *memcg, *old_memcg; 870 871 memcg = get_memcg(c); 872 old_memcg = set_active_memcg(memcg); 873 ret = __alloc(c, NUMA_NO_NODE, GFP_KERNEL | __GFP_NOWARN | __GFP_ACCOUNT); 874 set_active_memcg(old_memcg); 875 mem_cgroup_put(memcg); 876 } 877 878 return !ret ? NULL : ret + LLIST_NODE_SZ; 879 } 880