1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ 3 #include <linux/mm.h> 4 #include <linux/llist.h> 5 #include <linux/bpf.h> 6 #include <linux/irq_work.h> 7 #include <linux/bpf_mem_alloc.h> 8 #include <linux/memcontrol.h> 9 #include <asm/local.h> 10 11 /* Any context (including NMI) BPF specific memory allocator. 12 * 13 * Tracing BPF programs can attach to kprobe and fentry. Hence they 14 * run in unknown context where calling plain kmalloc() might not be safe. 15 * 16 * Front-end kmalloc() with per-cpu per-bucket cache of free elements. 17 * Refill this cache asynchronously from irq_work. 18 * 19 * CPU_0 buckets 20 * 16 32 64 96 128 196 256 512 1024 2048 4096 21 * ... 22 * CPU_N buckets 23 * 16 32 64 96 128 196 256 512 1024 2048 4096 24 * 25 * The buckets are prefilled at the start. 26 * BPF programs always run with migration disabled. 27 * It's safe to allocate from cache of the current cpu with irqs disabled. 28 * Free-ing is always done into bucket of the current cpu as well. 29 * irq_work trims extra free elements from buckets with kfree 30 * and refills them with kmalloc, so global kmalloc logic takes care 31 * of freeing objects allocated by one cpu and freed on another. 32 * 33 * Every allocated objected is padded with extra 8 bytes that contains 34 * struct llist_node. 35 */ 36 #define LLIST_NODE_SZ sizeof(struct llist_node) 37 38 /* similar to kmalloc, but sizeof == 8 bucket is gone */ 39 static u8 size_index[24] __ro_after_init = { 40 3, /* 8 */ 41 3, /* 16 */ 42 4, /* 24 */ 43 4, /* 32 */ 44 5, /* 40 */ 45 5, /* 48 */ 46 5, /* 56 */ 47 5, /* 64 */ 48 1, /* 72 */ 49 1, /* 80 */ 50 1, /* 88 */ 51 1, /* 96 */ 52 6, /* 104 */ 53 6, /* 112 */ 54 6, /* 120 */ 55 6, /* 128 */ 56 2, /* 136 */ 57 2, /* 144 */ 58 2, /* 152 */ 59 2, /* 160 */ 60 2, /* 168 */ 61 2, /* 176 */ 62 2, /* 184 */ 63 2 /* 192 */ 64 }; 65 66 static int bpf_mem_cache_idx(size_t size) 67 { 68 if (!size || size > 4096) 69 return -1; 70 71 if (size <= 192) 72 return size_index[(size - 1) / 8] - 1; 73 74 return fls(size - 1) - 2; 75 } 76 77 #define NUM_CACHES 11 78 79 struct bpf_mem_cache { 80 /* per-cpu list of free objects of size 'unit_size'. 81 * All accesses are done with interrupts disabled and 'active' counter 82 * protection with __llist_add() and __llist_del_first(). 83 */ 84 struct llist_head free_llist; 85 local_t active; 86 87 /* Operations on the free_list from unit_alloc/unit_free/bpf_mem_refill 88 * are sequenced by per-cpu 'active' counter. But unit_free() cannot 89 * fail. When 'active' is busy the unit_free() will add an object to 90 * free_llist_extra. 91 */ 92 struct llist_head free_llist_extra; 93 94 struct irq_work refill_work; 95 struct obj_cgroup *objcg; 96 int unit_size; 97 /* count of objects in free_llist */ 98 int free_cnt; 99 int low_watermark, high_watermark, batch; 100 int percpu_size; 101 102 /* list of objects to be freed after RCU tasks trace GP */ 103 struct llist_head free_by_rcu_ttrace; 104 struct llist_head waiting_for_gp_ttrace; 105 struct rcu_head rcu_ttrace; 106 atomic_t call_rcu_ttrace_in_progress; 107 }; 108 109 struct bpf_mem_caches { 110 struct bpf_mem_cache cache[NUM_CACHES]; 111 }; 112 113 static struct llist_node notrace *__llist_del_first(struct llist_head *head) 114 { 115 struct llist_node *entry, *next; 116 117 entry = head->first; 118 if (!entry) 119 return NULL; 120 next = entry->next; 121 head->first = next; 122 return entry; 123 } 124 125 static void *__alloc(struct bpf_mem_cache *c, int node, gfp_t flags) 126 { 127 if (c->percpu_size) { 128 void **obj = kmalloc_node(c->percpu_size, flags, node); 129 void *pptr = __alloc_percpu_gfp(c->unit_size, 8, flags); 130 131 if (!obj || !pptr) { 132 free_percpu(pptr); 133 kfree(obj); 134 return NULL; 135 } 136 obj[1] = pptr; 137 return obj; 138 } 139 140 return kmalloc_node(c->unit_size, flags | __GFP_ZERO, node); 141 } 142 143 static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c) 144 { 145 #ifdef CONFIG_MEMCG_KMEM 146 if (c->objcg) 147 return get_mem_cgroup_from_objcg(c->objcg); 148 #endif 149 150 #ifdef CONFIG_MEMCG 151 return root_mem_cgroup; 152 #else 153 return NULL; 154 #endif 155 } 156 157 static void inc_active(struct bpf_mem_cache *c, unsigned long *flags) 158 { 159 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 160 /* In RT irq_work runs in per-cpu kthread, so disable 161 * interrupts to avoid preemption and interrupts and 162 * reduce the chance of bpf prog executing on this cpu 163 * when active counter is busy. 164 */ 165 local_irq_save(*flags); 166 /* alloc_bulk runs from irq_work which will not preempt a bpf 167 * program that does unit_alloc/unit_free since IRQs are 168 * disabled there. There is no race to increment 'active' 169 * counter. It protects free_llist from corruption in case NMI 170 * bpf prog preempted this loop. 171 */ 172 WARN_ON_ONCE(local_inc_return(&c->active) != 1); 173 } 174 175 static void dec_active(struct bpf_mem_cache *c, unsigned long flags) 176 { 177 local_dec(&c->active); 178 if (IS_ENABLED(CONFIG_PREEMPT_RT)) 179 local_irq_restore(flags); 180 } 181 182 static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj) 183 { 184 unsigned long flags; 185 186 inc_active(c, &flags); 187 __llist_add(obj, &c->free_llist); 188 c->free_cnt++; 189 dec_active(c, flags); 190 } 191 192 /* Mostly runs from irq_work except __init phase. */ 193 static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node) 194 { 195 struct mem_cgroup *memcg = NULL, *old_memcg; 196 void *obj; 197 int i; 198 199 for (i = 0; i < cnt; i++) { 200 /* 201 * free_by_rcu_ttrace is only manipulated by irq work refill_work(). 202 * IRQ works on the same CPU are called sequentially, so it is 203 * safe to use __llist_del_first() here. If alloc_bulk() is 204 * invoked by the initial prefill, there will be no running 205 * refill_work(), so __llist_del_first() is fine as well. 206 * 207 * In most cases, objects on free_by_rcu_ttrace are from the same CPU. 208 * If some objects come from other CPUs, it doesn't incur any 209 * harm because NUMA_NO_NODE means the preference for current 210 * numa node and it is not a guarantee. 211 */ 212 obj = __llist_del_first(&c->free_by_rcu_ttrace); 213 if (!obj) 214 break; 215 add_obj_to_free_list(c, obj); 216 } 217 if (i >= cnt) 218 return; 219 220 memcg = get_memcg(c); 221 old_memcg = set_active_memcg(memcg); 222 for (; i < cnt; i++) { 223 /* Allocate, but don't deplete atomic reserves that typical 224 * GFP_ATOMIC would do. irq_work runs on this cpu and kmalloc 225 * will allocate from the current numa node which is what we 226 * want here. 227 */ 228 obj = __alloc(c, node, GFP_NOWAIT | __GFP_NOWARN | __GFP_ACCOUNT); 229 if (!obj) 230 break; 231 add_obj_to_free_list(c, obj); 232 } 233 set_active_memcg(old_memcg); 234 mem_cgroup_put(memcg); 235 } 236 237 static void free_one(void *obj, bool percpu) 238 { 239 if (percpu) { 240 free_percpu(((void **)obj)[1]); 241 kfree(obj); 242 return; 243 } 244 245 kfree(obj); 246 } 247 248 static int free_all(struct llist_node *llnode, bool percpu) 249 { 250 struct llist_node *pos, *t; 251 int cnt = 0; 252 253 llist_for_each_safe(pos, t, llnode) { 254 free_one(pos, percpu); 255 cnt++; 256 } 257 return cnt; 258 } 259 260 static void __free_rcu(struct rcu_head *head) 261 { 262 struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu_ttrace); 263 264 free_all(llist_del_all(&c->waiting_for_gp_ttrace), !!c->percpu_size); 265 atomic_set(&c->call_rcu_ttrace_in_progress, 0); 266 } 267 268 static void __free_rcu_tasks_trace(struct rcu_head *head) 269 { 270 /* If RCU Tasks Trace grace period implies RCU grace period, 271 * there is no need to invoke call_rcu(). 272 */ 273 if (rcu_trace_implies_rcu_gp()) 274 __free_rcu(head); 275 else 276 call_rcu(head, __free_rcu); 277 } 278 279 static void enque_to_free(struct bpf_mem_cache *c, void *obj) 280 { 281 struct llist_node *llnode = obj; 282 283 /* bpf_mem_cache is a per-cpu object. Freeing happens in irq_work. 284 * Nothing races to add to free_by_rcu_ttrace list. 285 */ 286 __llist_add(llnode, &c->free_by_rcu_ttrace); 287 } 288 289 static void do_call_rcu_ttrace(struct bpf_mem_cache *c) 290 { 291 struct llist_node *llnode, *t; 292 293 if (atomic_xchg(&c->call_rcu_ttrace_in_progress, 1)) 294 return; 295 296 WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace)); 297 llist_for_each_safe(llnode, t, __llist_del_all(&c->free_by_rcu_ttrace)) 298 /* There is no concurrent __llist_add(waiting_for_gp_ttrace) access. 299 * It doesn't race with llist_del_all either. 300 * But there could be two concurrent llist_del_all(waiting_for_gp_ttrace): 301 * from __free_rcu() and from drain_mem_cache(). 302 */ 303 __llist_add(llnode, &c->waiting_for_gp_ttrace); 304 /* Use call_rcu_tasks_trace() to wait for sleepable progs to finish. 305 * If RCU Tasks Trace grace period implies RCU grace period, free 306 * these elements directly, else use call_rcu() to wait for normal 307 * progs to finish and finally do free_one() on each element. 308 */ 309 call_rcu_tasks_trace(&c->rcu_ttrace, __free_rcu_tasks_trace); 310 } 311 312 static void free_bulk(struct bpf_mem_cache *c) 313 { 314 struct llist_node *llnode, *t; 315 unsigned long flags; 316 int cnt; 317 318 do { 319 inc_active(c, &flags); 320 llnode = __llist_del_first(&c->free_llist); 321 if (llnode) 322 cnt = --c->free_cnt; 323 else 324 cnt = 0; 325 dec_active(c, flags); 326 if (llnode) 327 enque_to_free(c, llnode); 328 } while (cnt > (c->high_watermark + c->low_watermark) / 2); 329 330 /* and drain free_llist_extra */ 331 llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra)) 332 enque_to_free(c, llnode); 333 do_call_rcu_ttrace(c); 334 } 335 336 static void bpf_mem_refill(struct irq_work *work) 337 { 338 struct bpf_mem_cache *c = container_of(work, struct bpf_mem_cache, refill_work); 339 int cnt; 340 341 /* Racy access to free_cnt. It doesn't need to be 100% accurate */ 342 cnt = c->free_cnt; 343 if (cnt < c->low_watermark) 344 /* irq_work runs on this cpu and kmalloc will allocate 345 * from the current numa node which is what we want here. 346 */ 347 alloc_bulk(c, c->batch, NUMA_NO_NODE); 348 else if (cnt > c->high_watermark) 349 free_bulk(c); 350 } 351 352 static void notrace irq_work_raise(struct bpf_mem_cache *c) 353 { 354 irq_work_queue(&c->refill_work); 355 } 356 357 /* For typical bpf map case that uses bpf_mem_cache_alloc and single bucket 358 * the freelist cache will be elem_size * 64 (or less) on each cpu. 359 * 360 * For bpf programs that don't have statically known allocation sizes and 361 * assuming (low_mark + high_mark) / 2 as an average number of elements per 362 * bucket and all buckets are used the total amount of memory in freelists 363 * on each cpu will be: 364 * 64*16 + 64*32 + 64*64 + 64*96 + 64*128 + 64*196 + 64*256 + 32*512 + 16*1024 + 8*2048 + 4*4096 365 * == ~ 116 Kbyte using below heuristic. 366 * Initialized, but unused bpf allocator (not bpf map specific one) will 367 * consume ~ 11 Kbyte per cpu. 368 * Typical case will be between 11K and 116K closer to 11K. 369 * bpf progs can and should share bpf_mem_cache when possible. 370 */ 371 372 static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu) 373 { 374 init_irq_work(&c->refill_work, bpf_mem_refill); 375 if (c->unit_size <= 256) { 376 c->low_watermark = 32; 377 c->high_watermark = 96; 378 } else { 379 /* When page_size == 4k, order-0 cache will have low_mark == 2 380 * and high_mark == 6 with batch alloc of 3 individual pages at 381 * a time. 382 * 8k allocs and above low == 1, high == 3, batch == 1. 383 */ 384 c->low_watermark = max(32 * 256 / c->unit_size, 1); 385 c->high_watermark = max(96 * 256 / c->unit_size, 3); 386 } 387 c->batch = max((c->high_watermark - c->low_watermark) / 4 * 3, 1); 388 389 /* To avoid consuming memory assume that 1st run of bpf 390 * prog won't be doing more than 4 map_update_elem from 391 * irq disabled region 392 */ 393 alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu)); 394 } 395 396 /* When size != 0 bpf_mem_cache for each cpu. 397 * This is typical bpf hash map use case when all elements have equal size. 398 * 399 * When size == 0 allocate 11 bpf_mem_cache-s for each cpu, then rely on 400 * kmalloc/kfree. Max allocation size is 4096 in this case. 401 * This is bpf_dynptr and bpf_kptr use case. 402 */ 403 int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu) 404 { 405 static u16 sizes[NUM_CACHES] = {96, 192, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096}; 406 struct bpf_mem_caches *cc, __percpu *pcc; 407 struct bpf_mem_cache *c, __percpu *pc; 408 struct obj_cgroup *objcg = NULL; 409 int cpu, i, unit_size, percpu_size = 0; 410 411 if (size) { 412 pc = __alloc_percpu_gfp(sizeof(*pc), 8, GFP_KERNEL); 413 if (!pc) 414 return -ENOMEM; 415 416 if (percpu) 417 /* room for llist_node and per-cpu pointer */ 418 percpu_size = LLIST_NODE_SZ + sizeof(void *); 419 else 420 size += LLIST_NODE_SZ; /* room for llist_node */ 421 unit_size = size; 422 423 #ifdef CONFIG_MEMCG_KMEM 424 if (memcg_bpf_enabled()) 425 objcg = get_obj_cgroup_from_current(); 426 #endif 427 for_each_possible_cpu(cpu) { 428 c = per_cpu_ptr(pc, cpu); 429 c->unit_size = unit_size; 430 c->objcg = objcg; 431 c->percpu_size = percpu_size; 432 prefill_mem_cache(c, cpu); 433 } 434 ma->cache = pc; 435 return 0; 436 } 437 438 /* size == 0 && percpu is an invalid combination */ 439 if (WARN_ON_ONCE(percpu)) 440 return -EINVAL; 441 442 pcc = __alloc_percpu_gfp(sizeof(*cc), 8, GFP_KERNEL); 443 if (!pcc) 444 return -ENOMEM; 445 #ifdef CONFIG_MEMCG_KMEM 446 objcg = get_obj_cgroup_from_current(); 447 #endif 448 for_each_possible_cpu(cpu) { 449 cc = per_cpu_ptr(pcc, cpu); 450 for (i = 0; i < NUM_CACHES; i++) { 451 c = &cc->cache[i]; 452 c->unit_size = sizes[i]; 453 c->objcg = objcg; 454 prefill_mem_cache(c, cpu); 455 } 456 } 457 ma->caches = pcc; 458 return 0; 459 } 460 461 static void drain_mem_cache(struct bpf_mem_cache *c) 462 { 463 bool percpu = !!c->percpu_size; 464 465 /* No progs are using this bpf_mem_cache, but htab_map_free() called 466 * bpf_mem_cache_free() for all remaining elements and they can be in 467 * free_by_rcu_ttrace or in waiting_for_gp_ttrace lists, so drain those lists now. 468 * 469 * Except for waiting_for_gp_ttrace list, there are no concurrent operations 470 * on these lists, so it is safe to use __llist_del_all(). 471 */ 472 free_all(__llist_del_all(&c->free_by_rcu_ttrace), percpu); 473 free_all(llist_del_all(&c->waiting_for_gp_ttrace), percpu); 474 free_all(__llist_del_all(&c->free_llist), percpu); 475 free_all(__llist_del_all(&c->free_llist_extra), percpu); 476 } 477 478 static void free_mem_alloc_no_barrier(struct bpf_mem_alloc *ma) 479 { 480 free_percpu(ma->cache); 481 free_percpu(ma->caches); 482 ma->cache = NULL; 483 ma->caches = NULL; 484 } 485 486 static void free_mem_alloc(struct bpf_mem_alloc *ma) 487 { 488 /* waiting_for_gp_ttrace lists was drained, but __free_rcu might 489 * still execute. Wait for it now before we freeing percpu caches. 490 * 491 * rcu_barrier_tasks_trace() doesn't imply synchronize_rcu_tasks_trace(), 492 * but rcu_barrier_tasks_trace() and rcu_barrier() below are only used 493 * to wait for the pending __free_rcu_tasks_trace() and __free_rcu(), 494 * so if call_rcu(head, __free_rcu) is skipped due to 495 * rcu_trace_implies_rcu_gp(), it will be OK to skip rcu_barrier() by 496 * using rcu_trace_implies_rcu_gp() as well. 497 */ 498 rcu_barrier_tasks_trace(); 499 if (!rcu_trace_implies_rcu_gp()) 500 rcu_barrier(); 501 free_mem_alloc_no_barrier(ma); 502 } 503 504 static void free_mem_alloc_deferred(struct work_struct *work) 505 { 506 struct bpf_mem_alloc *ma = container_of(work, struct bpf_mem_alloc, work); 507 508 free_mem_alloc(ma); 509 kfree(ma); 510 } 511 512 static void destroy_mem_alloc(struct bpf_mem_alloc *ma, int rcu_in_progress) 513 { 514 struct bpf_mem_alloc *copy; 515 516 if (!rcu_in_progress) { 517 /* Fast path. No callbacks are pending, hence no need to do 518 * rcu_barrier-s. 519 */ 520 free_mem_alloc_no_barrier(ma); 521 return; 522 } 523 524 copy = kmemdup(ma, sizeof(*ma), GFP_KERNEL); 525 if (!copy) { 526 /* Slow path with inline barrier-s */ 527 free_mem_alloc(ma); 528 return; 529 } 530 531 /* Defer barriers into worker to let the rest of map memory to be freed */ 532 memset(ma, 0, sizeof(*ma)); 533 INIT_WORK(©->work, free_mem_alloc_deferred); 534 queue_work(system_unbound_wq, ©->work); 535 } 536 537 void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma) 538 { 539 struct bpf_mem_caches *cc; 540 struct bpf_mem_cache *c; 541 int cpu, i, rcu_in_progress; 542 543 if (ma->cache) { 544 rcu_in_progress = 0; 545 for_each_possible_cpu(cpu) { 546 c = per_cpu_ptr(ma->cache, cpu); 547 /* 548 * refill_work may be unfinished for PREEMPT_RT kernel 549 * in which irq work is invoked in a per-CPU RT thread. 550 * It is also possible for kernel with 551 * arch_irq_work_has_interrupt() being false and irq 552 * work is invoked in timer interrupt. So waiting for 553 * the completion of irq work to ease the handling of 554 * concurrency. 555 */ 556 irq_work_sync(&c->refill_work); 557 drain_mem_cache(c); 558 rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress); 559 } 560 /* objcg is the same across cpus */ 561 if (c->objcg) 562 obj_cgroup_put(c->objcg); 563 destroy_mem_alloc(ma, rcu_in_progress); 564 } 565 if (ma->caches) { 566 rcu_in_progress = 0; 567 for_each_possible_cpu(cpu) { 568 cc = per_cpu_ptr(ma->caches, cpu); 569 for (i = 0; i < NUM_CACHES; i++) { 570 c = &cc->cache[i]; 571 irq_work_sync(&c->refill_work); 572 drain_mem_cache(c); 573 rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress); 574 } 575 } 576 if (c->objcg) 577 obj_cgroup_put(c->objcg); 578 destroy_mem_alloc(ma, rcu_in_progress); 579 } 580 } 581 582 /* notrace is necessary here and in other functions to make sure 583 * bpf programs cannot attach to them and cause llist corruptions. 584 */ 585 static void notrace *unit_alloc(struct bpf_mem_cache *c) 586 { 587 struct llist_node *llnode = NULL; 588 unsigned long flags; 589 int cnt = 0; 590 591 /* Disable irqs to prevent the following race for majority of prog types: 592 * prog_A 593 * bpf_mem_alloc 594 * preemption or irq -> prog_B 595 * bpf_mem_alloc 596 * 597 * but prog_B could be a perf_event NMI prog. 598 * Use per-cpu 'active' counter to order free_list access between 599 * unit_alloc/unit_free/bpf_mem_refill. 600 */ 601 local_irq_save(flags); 602 if (local_inc_return(&c->active) == 1) { 603 llnode = __llist_del_first(&c->free_llist); 604 if (llnode) 605 cnt = --c->free_cnt; 606 } 607 local_dec(&c->active); 608 local_irq_restore(flags); 609 610 WARN_ON(cnt < 0); 611 612 if (cnt < c->low_watermark) 613 irq_work_raise(c); 614 return llnode; 615 } 616 617 /* Though 'ptr' object could have been allocated on a different cpu 618 * add it to the free_llist of the current cpu. 619 * Let kfree() logic deal with it when it's later called from irq_work. 620 */ 621 static void notrace unit_free(struct bpf_mem_cache *c, void *ptr) 622 { 623 struct llist_node *llnode = ptr - LLIST_NODE_SZ; 624 unsigned long flags; 625 int cnt = 0; 626 627 BUILD_BUG_ON(LLIST_NODE_SZ > 8); 628 629 local_irq_save(flags); 630 if (local_inc_return(&c->active) == 1) { 631 __llist_add(llnode, &c->free_llist); 632 cnt = ++c->free_cnt; 633 } else { 634 /* unit_free() cannot fail. Therefore add an object to atomic 635 * llist. free_bulk() will drain it. Though free_llist_extra is 636 * a per-cpu list we have to use atomic llist_add here, since 637 * it also can be interrupted by bpf nmi prog that does another 638 * unit_free() into the same free_llist_extra. 639 */ 640 llist_add(llnode, &c->free_llist_extra); 641 } 642 local_dec(&c->active); 643 local_irq_restore(flags); 644 645 if (cnt > c->high_watermark) 646 /* free few objects from current cpu into global kmalloc pool */ 647 irq_work_raise(c); 648 } 649 650 /* Called from BPF program or from sys_bpf syscall. 651 * In both cases migration is disabled. 652 */ 653 void notrace *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size) 654 { 655 int idx; 656 void *ret; 657 658 if (!size) 659 return ZERO_SIZE_PTR; 660 661 idx = bpf_mem_cache_idx(size + LLIST_NODE_SZ); 662 if (idx < 0) 663 return NULL; 664 665 ret = unit_alloc(this_cpu_ptr(ma->caches)->cache + idx); 666 return !ret ? NULL : ret + LLIST_NODE_SZ; 667 } 668 669 void notrace bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr) 670 { 671 int idx; 672 673 if (!ptr) 674 return; 675 676 idx = bpf_mem_cache_idx(ksize(ptr - LLIST_NODE_SZ)); 677 if (idx < 0) 678 return; 679 680 unit_free(this_cpu_ptr(ma->caches)->cache + idx, ptr); 681 } 682 683 void notrace *bpf_mem_cache_alloc(struct bpf_mem_alloc *ma) 684 { 685 void *ret; 686 687 ret = unit_alloc(this_cpu_ptr(ma->cache)); 688 return !ret ? NULL : ret + LLIST_NODE_SZ; 689 } 690 691 void notrace bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr) 692 { 693 if (!ptr) 694 return; 695 696 unit_free(this_cpu_ptr(ma->cache), ptr); 697 } 698 699 /* Directly does a kfree() without putting 'ptr' back to the free_llist 700 * for reuse and without waiting for a rcu_tasks_trace gp. 701 * The caller must first go through the rcu_tasks_trace gp for 'ptr' 702 * before calling bpf_mem_cache_raw_free(). 703 * It could be used when the rcu_tasks_trace callback does not have 704 * a hold on the original bpf_mem_alloc object that allocated the 705 * 'ptr'. This should only be used in the uncommon code path. 706 * Otherwise, the bpf_mem_alloc's free_llist cannot be refilled 707 * and may affect performance. 708 */ 709 void bpf_mem_cache_raw_free(void *ptr) 710 { 711 if (!ptr) 712 return; 713 714 kfree(ptr - LLIST_NODE_SZ); 715 } 716 717 /* When flags == GFP_KERNEL, it signals that the caller will not cause 718 * deadlock when using kmalloc. bpf_mem_cache_alloc_flags() will use 719 * kmalloc if the free_llist is empty. 720 */ 721 void notrace *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags) 722 { 723 struct bpf_mem_cache *c; 724 void *ret; 725 726 c = this_cpu_ptr(ma->cache); 727 728 ret = unit_alloc(c); 729 if (!ret && flags == GFP_KERNEL) { 730 struct mem_cgroup *memcg, *old_memcg; 731 732 memcg = get_memcg(c); 733 old_memcg = set_active_memcg(memcg); 734 ret = __alloc(c, NUMA_NO_NODE, GFP_KERNEL | __GFP_NOWARN | __GFP_ACCOUNT); 735 set_active_memcg(old_memcg); 736 mem_cgroup_put(memcg); 737 } 738 739 return !ret ? NULL : ret + LLIST_NODE_SZ; 740 } 741