17c8199e2SAlexei Starovoitov // SPDX-License-Identifier: GPL-2.0-only 27c8199e2SAlexei Starovoitov /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ 37c8199e2SAlexei Starovoitov #include <linux/mm.h> 47c8199e2SAlexei Starovoitov #include <linux/llist.h> 57c8199e2SAlexei Starovoitov #include <linux/bpf.h> 67c8199e2SAlexei Starovoitov #include <linux/irq_work.h> 77c8199e2SAlexei Starovoitov #include <linux/bpf_mem_alloc.h> 87c8199e2SAlexei Starovoitov #include <linux/memcontrol.h> 97c8199e2SAlexei Starovoitov #include <asm/local.h> 107c8199e2SAlexei Starovoitov 117c8199e2SAlexei Starovoitov /* Any context (including NMI) BPF specific memory allocator. 127c8199e2SAlexei Starovoitov * 137c8199e2SAlexei Starovoitov * Tracing BPF programs can attach to kprobe and fentry. Hence they 147c8199e2SAlexei Starovoitov * run in unknown context where calling plain kmalloc() might not be safe. 157c8199e2SAlexei Starovoitov * 167c8199e2SAlexei Starovoitov * Front-end kmalloc() with per-cpu per-bucket cache of free elements. 177c8199e2SAlexei Starovoitov * Refill this cache asynchronously from irq_work. 187c8199e2SAlexei Starovoitov * 197c8199e2SAlexei Starovoitov * CPU_0 buckets 207c8199e2SAlexei Starovoitov * 16 32 64 96 128 196 256 512 1024 2048 4096 217c8199e2SAlexei Starovoitov * ... 227c8199e2SAlexei Starovoitov * CPU_N buckets 237c8199e2SAlexei Starovoitov * 16 32 64 96 128 196 256 512 1024 2048 4096 247c8199e2SAlexei Starovoitov * 257c8199e2SAlexei Starovoitov * The buckets are prefilled at the start. 267c8199e2SAlexei Starovoitov * BPF programs always run with migration disabled. 277c8199e2SAlexei Starovoitov * It's safe to allocate from cache of the current cpu with irqs disabled. 287c8199e2SAlexei Starovoitov * Free-ing is always done into bucket of the current cpu as well. 297c8199e2SAlexei Starovoitov * irq_work trims extra free elements from buckets with kfree 307c8199e2SAlexei Starovoitov * and refills them with kmalloc, so global kmalloc logic takes care 317c8199e2SAlexei Starovoitov * of freeing objects allocated by one cpu and freed on another. 327c8199e2SAlexei Starovoitov * 337c8199e2SAlexei Starovoitov * Every allocated objected is padded with extra 8 bytes that contains 347c8199e2SAlexei Starovoitov * struct llist_node. 357c8199e2SAlexei Starovoitov */ 367c8199e2SAlexei Starovoitov #define LLIST_NODE_SZ sizeof(struct llist_node) 377c8199e2SAlexei Starovoitov 387c8199e2SAlexei Starovoitov /* similar to kmalloc, but sizeof == 8 bucket is gone */ 397c8199e2SAlexei Starovoitov static u8 size_index[24] __ro_after_init = { 407c8199e2SAlexei Starovoitov 3, /* 8 */ 417c8199e2SAlexei Starovoitov 3, /* 16 */ 427c8199e2SAlexei Starovoitov 4, /* 24 */ 437c8199e2SAlexei Starovoitov 4, /* 32 */ 447c8199e2SAlexei Starovoitov 5, /* 40 */ 457c8199e2SAlexei Starovoitov 5, /* 48 */ 467c8199e2SAlexei Starovoitov 5, /* 56 */ 477c8199e2SAlexei Starovoitov 5, /* 64 */ 487c8199e2SAlexei Starovoitov 1, /* 72 */ 497c8199e2SAlexei Starovoitov 1, /* 80 */ 507c8199e2SAlexei Starovoitov 1, /* 88 */ 517c8199e2SAlexei Starovoitov 1, /* 96 */ 527c8199e2SAlexei Starovoitov 6, /* 104 */ 537c8199e2SAlexei Starovoitov 6, /* 112 */ 547c8199e2SAlexei Starovoitov 6, /* 120 */ 557c8199e2SAlexei Starovoitov 6, /* 128 */ 567c8199e2SAlexei Starovoitov 2, /* 136 */ 577c8199e2SAlexei Starovoitov 2, /* 144 */ 587c8199e2SAlexei Starovoitov 2, /* 152 */ 597c8199e2SAlexei Starovoitov 2, /* 160 */ 607c8199e2SAlexei Starovoitov 2, /* 168 */ 617c8199e2SAlexei Starovoitov 2, /* 176 */ 627c8199e2SAlexei Starovoitov 2, /* 184 */ 637c8199e2SAlexei Starovoitov 2 /* 192 */ 647c8199e2SAlexei Starovoitov }; 657c8199e2SAlexei Starovoitov 667c8199e2SAlexei Starovoitov static int bpf_mem_cache_idx(size_t size) 677c8199e2SAlexei Starovoitov { 687c8199e2SAlexei Starovoitov if (!size || size > 4096) 697c8199e2SAlexei Starovoitov return -1; 707c8199e2SAlexei Starovoitov 717c8199e2SAlexei Starovoitov if (size <= 192) 727c8199e2SAlexei Starovoitov return size_index[(size - 1) / 8] - 1; 737c8199e2SAlexei Starovoitov 7436024d02SHou Tao return fls(size - 1) - 2; 757c8199e2SAlexei Starovoitov } 767c8199e2SAlexei Starovoitov 777c8199e2SAlexei Starovoitov #define NUM_CACHES 11 787c8199e2SAlexei Starovoitov 797c8199e2SAlexei Starovoitov struct bpf_mem_cache { 807c8199e2SAlexei Starovoitov /* per-cpu list of free objects of size 'unit_size'. 817c8199e2SAlexei Starovoitov * All accesses are done with interrupts disabled and 'active' counter 827c8199e2SAlexei Starovoitov * protection with __llist_add() and __llist_del_first(). 837c8199e2SAlexei Starovoitov */ 847c8199e2SAlexei Starovoitov struct llist_head free_llist; 857c8199e2SAlexei Starovoitov local_t active; 867c8199e2SAlexei Starovoitov 877c8199e2SAlexei Starovoitov /* Operations on the free_list from unit_alloc/unit_free/bpf_mem_refill 887c8199e2SAlexei Starovoitov * are sequenced by per-cpu 'active' counter. But unit_free() cannot 897c8199e2SAlexei Starovoitov * fail. When 'active' is busy the unit_free() will add an object to 907c8199e2SAlexei Starovoitov * free_llist_extra. 917c8199e2SAlexei Starovoitov */ 927c8199e2SAlexei Starovoitov struct llist_head free_llist_extra; 937c8199e2SAlexei Starovoitov 947c8199e2SAlexei Starovoitov struct irq_work refill_work; 957c8199e2SAlexei Starovoitov struct obj_cgroup *objcg; 967c8199e2SAlexei Starovoitov int unit_size; 977c8199e2SAlexei Starovoitov /* count of objects in free_llist */ 987c8199e2SAlexei Starovoitov int free_cnt; 997c266178SAlexei Starovoitov int low_watermark, high_watermark, batch; 100bfc03c15SAlexei Starovoitov int percpu_size; 101d114dde2SAlexei Starovoitov bool draining; 102822fb26bSAlexei Starovoitov struct bpf_mem_cache *tgt; 1038d5a8011SAlexei Starovoitov 10412c8d0f4SAlexei Starovoitov /* list of objects to be freed after RCU tasks trace GP */ 10512c8d0f4SAlexei Starovoitov struct llist_head free_by_rcu_ttrace; 10612c8d0f4SAlexei Starovoitov struct llist_head waiting_for_gp_ttrace; 10712c8d0f4SAlexei Starovoitov struct rcu_head rcu_ttrace; 10812c8d0f4SAlexei Starovoitov atomic_t call_rcu_ttrace_in_progress; 1097c8199e2SAlexei Starovoitov }; 1107c8199e2SAlexei Starovoitov 1117c8199e2SAlexei Starovoitov struct bpf_mem_caches { 1127c8199e2SAlexei Starovoitov struct bpf_mem_cache cache[NUM_CACHES]; 1137c8199e2SAlexei Starovoitov }; 1147c8199e2SAlexei Starovoitov 1157c8199e2SAlexei Starovoitov static struct llist_node notrace *__llist_del_first(struct llist_head *head) 1167c8199e2SAlexei Starovoitov { 1177c8199e2SAlexei Starovoitov struct llist_node *entry, *next; 1187c8199e2SAlexei Starovoitov 1197c8199e2SAlexei Starovoitov entry = head->first; 1207c8199e2SAlexei Starovoitov if (!entry) 1217c8199e2SAlexei Starovoitov return NULL; 1227c8199e2SAlexei Starovoitov next = entry->next; 1237c8199e2SAlexei Starovoitov head->first = next; 1247c8199e2SAlexei Starovoitov return entry; 1257c8199e2SAlexei Starovoitov } 1267c8199e2SAlexei Starovoitov 127e65a5c6eSMartin KaFai Lau static void *__alloc(struct bpf_mem_cache *c, int node, gfp_t flags) 1287c8199e2SAlexei Starovoitov { 129bfc03c15SAlexei Starovoitov if (c->percpu_size) { 130bfc03c15SAlexei Starovoitov void **obj = kmalloc_node(c->percpu_size, flags, node); 1314ab67149SAlexei Starovoitov void *pptr = __alloc_percpu_gfp(c->unit_size, 8, flags); 1324ab67149SAlexei Starovoitov 1334ab67149SAlexei Starovoitov if (!obj || !pptr) { 1344ab67149SAlexei Starovoitov free_percpu(pptr); 1354ab67149SAlexei Starovoitov kfree(obj); 1364ab67149SAlexei Starovoitov return NULL; 1374ab67149SAlexei Starovoitov } 1384ab67149SAlexei Starovoitov obj[1] = pptr; 1394ab67149SAlexei Starovoitov return obj; 1404ab67149SAlexei Starovoitov } 1414ab67149SAlexei Starovoitov 142997849c4SHou Tao return kmalloc_node(c->unit_size, flags | __GFP_ZERO, node); 1437c8199e2SAlexei Starovoitov } 1447c8199e2SAlexei Starovoitov 1457c8199e2SAlexei Starovoitov static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c) 1467c8199e2SAlexei Starovoitov { 1477c8199e2SAlexei Starovoitov #ifdef CONFIG_MEMCG_KMEM 1487c8199e2SAlexei Starovoitov if (c->objcg) 1497c8199e2SAlexei Starovoitov return get_mem_cgroup_from_objcg(c->objcg); 1507c8199e2SAlexei Starovoitov #endif 1517c8199e2SAlexei Starovoitov 1527c8199e2SAlexei Starovoitov #ifdef CONFIG_MEMCG 1537c8199e2SAlexei Starovoitov return root_mem_cgroup; 1547c8199e2SAlexei Starovoitov #else 1557c8199e2SAlexei Starovoitov return NULL; 1567c8199e2SAlexei Starovoitov #endif 1577c8199e2SAlexei Starovoitov } 1587c8199e2SAlexei Starovoitov 15918e027b1SAlexei Starovoitov static void inc_active(struct bpf_mem_cache *c, unsigned long *flags) 16005ae6865SAlexei Starovoitov { 16105ae6865SAlexei Starovoitov if (IS_ENABLED(CONFIG_PREEMPT_RT)) 16205ae6865SAlexei Starovoitov /* In RT irq_work runs in per-cpu kthread, so disable 16305ae6865SAlexei Starovoitov * interrupts to avoid preemption and interrupts and 16405ae6865SAlexei Starovoitov * reduce the chance of bpf prog executing on this cpu 16505ae6865SAlexei Starovoitov * when active counter is busy. 16605ae6865SAlexei Starovoitov */ 16718e027b1SAlexei Starovoitov local_irq_save(*flags); 16805ae6865SAlexei Starovoitov /* alloc_bulk runs from irq_work which will not preempt a bpf 16905ae6865SAlexei Starovoitov * program that does unit_alloc/unit_free since IRQs are 17005ae6865SAlexei Starovoitov * disabled there. There is no race to increment 'active' 17105ae6865SAlexei Starovoitov * counter. It protects free_llist from corruption in case NMI 17205ae6865SAlexei Starovoitov * bpf prog preempted this loop. 17305ae6865SAlexei Starovoitov */ 17405ae6865SAlexei Starovoitov WARN_ON_ONCE(local_inc_return(&c->active) != 1); 17518e027b1SAlexei Starovoitov } 17618e027b1SAlexei Starovoitov 17718e027b1SAlexei Starovoitov static void dec_active(struct bpf_mem_cache *c, unsigned long flags) 17818e027b1SAlexei Starovoitov { 17905ae6865SAlexei Starovoitov local_dec(&c->active); 18005ae6865SAlexei Starovoitov if (IS_ENABLED(CONFIG_PREEMPT_RT)) 18105ae6865SAlexei Starovoitov local_irq_restore(flags); 18205ae6865SAlexei Starovoitov } 18305ae6865SAlexei Starovoitov 18418e027b1SAlexei Starovoitov static void add_obj_to_free_list(struct bpf_mem_cache *c, void *obj) 18518e027b1SAlexei Starovoitov { 18618e027b1SAlexei Starovoitov unsigned long flags; 18718e027b1SAlexei Starovoitov 18818e027b1SAlexei Starovoitov inc_active(c, &flags); 18918e027b1SAlexei Starovoitov __llist_add(obj, &c->free_llist); 19018e027b1SAlexei Starovoitov c->free_cnt++; 19118e027b1SAlexei Starovoitov dec_active(c, flags); 19218e027b1SAlexei Starovoitov } 19318e027b1SAlexei Starovoitov 1947c8199e2SAlexei Starovoitov /* Mostly runs from irq_work except __init phase. */ 1957c8199e2SAlexei Starovoitov static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node) 1967c8199e2SAlexei Starovoitov { 1977c8199e2SAlexei Starovoitov struct mem_cgroup *memcg = NULL, *old_memcg; 1987c8199e2SAlexei Starovoitov void *obj; 1997c8199e2SAlexei Starovoitov int i; 2007c8199e2SAlexei Starovoitov 2017c8199e2SAlexei Starovoitov for (i = 0; i < cnt; i++) { 2020893d600SHou Tao /* 203822fb26bSAlexei Starovoitov * For every 'c' llist_del_first(&c->free_by_rcu_ttrace); is 204822fb26bSAlexei Starovoitov * done only by one CPU == current CPU. Other CPUs might 205822fb26bSAlexei Starovoitov * llist_add() and llist_del_all() in parallel. 2060893d600SHou Tao */ 207822fb26bSAlexei Starovoitov obj = llist_del_first(&c->free_by_rcu_ttrace); 20874680482SAlexei Starovoitov if (!obj) 20974680482SAlexei Starovoitov break; 21074680482SAlexei Starovoitov add_obj_to_free_list(c, obj); 21174680482SAlexei Starovoitov } 21274680482SAlexei Starovoitov if (i >= cnt) 21374680482SAlexei Starovoitov return; 21474680482SAlexei Starovoitov 215*04fabf00SAlexei Starovoitov for (; i < cnt; i++) { 216*04fabf00SAlexei Starovoitov obj = llist_del_first(&c->waiting_for_gp_ttrace); 217*04fabf00SAlexei Starovoitov if (!obj) 218*04fabf00SAlexei Starovoitov break; 219*04fabf00SAlexei Starovoitov add_obj_to_free_list(c, obj); 220*04fabf00SAlexei Starovoitov } 221*04fabf00SAlexei Starovoitov if (i >= cnt) 222*04fabf00SAlexei Starovoitov return; 223*04fabf00SAlexei Starovoitov 22474680482SAlexei Starovoitov memcg = get_memcg(c); 22574680482SAlexei Starovoitov old_memcg = set_active_memcg(memcg); 22674680482SAlexei Starovoitov for (; i < cnt; i++) { 227e65a5c6eSMartin KaFai Lau /* Allocate, but don't deplete atomic reserves that typical 228e65a5c6eSMartin KaFai Lau * GFP_ATOMIC would do. irq_work runs on this cpu and kmalloc 229e65a5c6eSMartin KaFai Lau * will allocate from the current numa node which is what we 230e65a5c6eSMartin KaFai Lau * want here. 231e65a5c6eSMartin KaFai Lau */ 232e65a5c6eSMartin KaFai Lau obj = __alloc(c, node, GFP_NOWAIT | __GFP_NOWARN | __GFP_ACCOUNT); 2337c8199e2SAlexei Starovoitov if (!obj) 2347c8199e2SAlexei Starovoitov break; 23505ae6865SAlexei Starovoitov add_obj_to_free_list(c, obj); 2367c8199e2SAlexei Starovoitov } 2377c8199e2SAlexei Starovoitov set_active_memcg(old_memcg); 2387c8199e2SAlexei Starovoitov mem_cgroup_put(memcg); 2397c8199e2SAlexei Starovoitov } 2407c8199e2SAlexei Starovoitov 241aa7881fcSHou Tao static void free_one(void *obj, bool percpu) 2427c8199e2SAlexei Starovoitov { 243aa7881fcSHou Tao if (percpu) { 2444ab67149SAlexei Starovoitov free_percpu(((void **)obj)[1]); 245bfc03c15SAlexei Starovoitov kfree(obj); 2464ab67149SAlexei Starovoitov return; 2474ab67149SAlexei Starovoitov } 2484ab67149SAlexei Starovoitov 2497c8199e2SAlexei Starovoitov kfree(obj); 2507c8199e2SAlexei Starovoitov } 2517c8199e2SAlexei Starovoitov 2529de3e815SAlexei Starovoitov static int free_all(struct llist_node *llnode, bool percpu) 2538d5a8011SAlexei Starovoitov { 2548d5a8011SAlexei Starovoitov struct llist_node *pos, *t; 2559de3e815SAlexei Starovoitov int cnt = 0; 2568d5a8011SAlexei Starovoitov 2579de3e815SAlexei Starovoitov llist_for_each_safe(pos, t, llnode) { 258aa7881fcSHou Tao free_one(pos, percpu); 2599de3e815SAlexei Starovoitov cnt++; 2609de3e815SAlexei Starovoitov } 2619de3e815SAlexei Starovoitov return cnt; 262aa7881fcSHou Tao } 263aa7881fcSHou Tao 264aa7881fcSHou Tao static void __free_rcu(struct rcu_head *head) 265aa7881fcSHou Tao { 26612c8d0f4SAlexei Starovoitov struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu_ttrace); 267aa7881fcSHou Tao 26812c8d0f4SAlexei Starovoitov free_all(llist_del_all(&c->waiting_for_gp_ttrace), !!c->percpu_size); 26912c8d0f4SAlexei Starovoitov atomic_set(&c->call_rcu_ttrace_in_progress, 0); 2708d5a8011SAlexei Starovoitov } 2718d5a8011SAlexei Starovoitov 272dccb4a90SAlexei Starovoitov static void __free_rcu_tasks_trace(struct rcu_head *head) 273dccb4a90SAlexei Starovoitov { 27459be91e5SHou Tao /* If RCU Tasks Trace grace period implies RCU grace period, 27559be91e5SHou Tao * there is no need to invoke call_rcu(). 27659be91e5SHou Tao */ 27759be91e5SHou Tao if (rcu_trace_implies_rcu_gp()) 27859be91e5SHou Tao __free_rcu(head); 27959be91e5SHou Tao else 28059be91e5SHou Tao call_rcu(head, __free_rcu); 281dccb4a90SAlexei Starovoitov } 282dccb4a90SAlexei Starovoitov 2838d5a8011SAlexei Starovoitov static void enque_to_free(struct bpf_mem_cache *c, void *obj) 2848d5a8011SAlexei Starovoitov { 2858d5a8011SAlexei Starovoitov struct llist_node *llnode = obj; 2868d5a8011SAlexei Starovoitov 2878d5a8011SAlexei Starovoitov /* bpf_mem_cache is a per-cpu object. Freeing happens in irq_work. 28812c8d0f4SAlexei Starovoitov * Nothing races to add to free_by_rcu_ttrace list. 2898d5a8011SAlexei Starovoitov */ 290822fb26bSAlexei Starovoitov llist_add(llnode, &c->free_by_rcu_ttrace); 2918d5a8011SAlexei Starovoitov } 2928d5a8011SAlexei Starovoitov 29312c8d0f4SAlexei Starovoitov static void do_call_rcu_ttrace(struct bpf_mem_cache *c) 2948d5a8011SAlexei Starovoitov { 2958d5a8011SAlexei Starovoitov struct llist_node *llnode, *t; 2968d5a8011SAlexei Starovoitov 297822fb26bSAlexei Starovoitov if (atomic_xchg(&c->call_rcu_ttrace_in_progress, 1)) { 298822fb26bSAlexei Starovoitov if (unlikely(READ_ONCE(c->draining))) { 299822fb26bSAlexei Starovoitov llnode = llist_del_all(&c->free_by_rcu_ttrace); 300822fb26bSAlexei Starovoitov free_all(llnode, !!c->percpu_size); 301822fb26bSAlexei Starovoitov } 3028d5a8011SAlexei Starovoitov return; 303822fb26bSAlexei Starovoitov } 3048d5a8011SAlexei Starovoitov 30512c8d0f4SAlexei Starovoitov WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp_ttrace)); 306822fb26bSAlexei Starovoitov llist_for_each_safe(llnode, t, llist_del_all(&c->free_by_rcu_ttrace)) 307*04fabf00SAlexei Starovoitov llist_add(llnode, &c->waiting_for_gp_ttrace); 308d114dde2SAlexei Starovoitov 309d114dde2SAlexei Starovoitov if (unlikely(READ_ONCE(c->draining))) { 310d114dde2SAlexei Starovoitov __free_rcu(&c->rcu_ttrace); 311d114dde2SAlexei Starovoitov return; 312d114dde2SAlexei Starovoitov } 313d114dde2SAlexei Starovoitov 314dccb4a90SAlexei Starovoitov /* Use call_rcu_tasks_trace() to wait for sleepable progs to finish. 31559be91e5SHou Tao * If RCU Tasks Trace grace period implies RCU grace period, free 31659be91e5SHou Tao * these elements directly, else use call_rcu() to wait for normal 31759be91e5SHou Tao * progs to finish and finally do free_one() on each element. 318dccb4a90SAlexei Starovoitov */ 31912c8d0f4SAlexei Starovoitov call_rcu_tasks_trace(&c->rcu_ttrace, __free_rcu_tasks_trace); 3208d5a8011SAlexei Starovoitov } 3218d5a8011SAlexei Starovoitov 3227c8199e2SAlexei Starovoitov static void free_bulk(struct bpf_mem_cache *c) 3237c8199e2SAlexei Starovoitov { 324822fb26bSAlexei Starovoitov struct bpf_mem_cache *tgt = c->tgt; 3257c8199e2SAlexei Starovoitov struct llist_node *llnode, *t; 3267c8199e2SAlexei Starovoitov unsigned long flags; 3277c8199e2SAlexei Starovoitov int cnt; 3287c8199e2SAlexei Starovoitov 329822fb26bSAlexei Starovoitov WARN_ON_ONCE(tgt->unit_size != c->unit_size); 330822fb26bSAlexei Starovoitov 3317c8199e2SAlexei Starovoitov do { 33218e027b1SAlexei Starovoitov inc_active(c, &flags); 3337c8199e2SAlexei Starovoitov llnode = __llist_del_first(&c->free_llist); 3347c8199e2SAlexei Starovoitov if (llnode) 3357c8199e2SAlexei Starovoitov cnt = --c->free_cnt; 3367c8199e2SAlexei Starovoitov else 3377c8199e2SAlexei Starovoitov cnt = 0; 33818e027b1SAlexei Starovoitov dec_active(c, flags); 339c31b38cbSHou Tao if (llnode) 340822fb26bSAlexei Starovoitov enque_to_free(tgt, llnode); 3417c266178SAlexei Starovoitov } while (cnt > (c->high_watermark + c->low_watermark) / 2); 3427c8199e2SAlexei Starovoitov 3437c8199e2SAlexei Starovoitov /* and drain free_llist_extra */ 3447c8199e2SAlexei Starovoitov llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra)) 345822fb26bSAlexei Starovoitov enque_to_free(tgt, llnode); 346822fb26bSAlexei Starovoitov do_call_rcu_ttrace(tgt); 3477c8199e2SAlexei Starovoitov } 3487c8199e2SAlexei Starovoitov 3497c8199e2SAlexei Starovoitov static void bpf_mem_refill(struct irq_work *work) 3507c8199e2SAlexei Starovoitov { 3517c8199e2SAlexei Starovoitov struct bpf_mem_cache *c = container_of(work, struct bpf_mem_cache, refill_work); 3527c8199e2SAlexei Starovoitov int cnt; 3537c8199e2SAlexei Starovoitov 3547c8199e2SAlexei Starovoitov /* Racy access to free_cnt. It doesn't need to be 100% accurate */ 3557c8199e2SAlexei Starovoitov cnt = c->free_cnt; 3567c266178SAlexei Starovoitov if (cnt < c->low_watermark) 3577c8199e2SAlexei Starovoitov /* irq_work runs on this cpu and kmalloc will allocate 3587c8199e2SAlexei Starovoitov * from the current numa node which is what we want here. 3597c8199e2SAlexei Starovoitov */ 3607c266178SAlexei Starovoitov alloc_bulk(c, c->batch, NUMA_NO_NODE); 3617c266178SAlexei Starovoitov else if (cnt > c->high_watermark) 3627c8199e2SAlexei Starovoitov free_bulk(c); 3637c8199e2SAlexei Starovoitov } 3647c8199e2SAlexei Starovoitov 3657c8199e2SAlexei Starovoitov static void notrace irq_work_raise(struct bpf_mem_cache *c) 3667c8199e2SAlexei Starovoitov { 3677c8199e2SAlexei Starovoitov irq_work_queue(&c->refill_work); 3687c8199e2SAlexei Starovoitov } 3697c8199e2SAlexei Starovoitov 3707c266178SAlexei Starovoitov /* For typical bpf map case that uses bpf_mem_cache_alloc and single bucket 3717c266178SAlexei Starovoitov * the freelist cache will be elem_size * 64 (or less) on each cpu. 3727c266178SAlexei Starovoitov * 3737c266178SAlexei Starovoitov * For bpf programs that don't have statically known allocation sizes and 3747c266178SAlexei Starovoitov * assuming (low_mark + high_mark) / 2 as an average number of elements per 3757c266178SAlexei Starovoitov * bucket and all buckets are used the total amount of memory in freelists 3767c266178SAlexei Starovoitov * on each cpu will be: 3777c266178SAlexei Starovoitov * 64*16 + 64*32 + 64*64 + 64*96 + 64*128 + 64*196 + 64*256 + 32*512 + 16*1024 + 8*2048 + 4*4096 3787c266178SAlexei Starovoitov * == ~ 116 Kbyte using below heuristic. 3797c266178SAlexei Starovoitov * Initialized, but unused bpf allocator (not bpf map specific one) will 3807c266178SAlexei Starovoitov * consume ~ 11 Kbyte per cpu. 3817c266178SAlexei Starovoitov * Typical case will be between 11K and 116K closer to 11K. 3827c266178SAlexei Starovoitov * bpf progs can and should share bpf_mem_cache when possible. 3837c266178SAlexei Starovoitov */ 3847c266178SAlexei Starovoitov 3857c8199e2SAlexei Starovoitov static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu) 3867c8199e2SAlexei Starovoitov { 3877c8199e2SAlexei Starovoitov init_irq_work(&c->refill_work, bpf_mem_refill); 3887c266178SAlexei Starovoitov if (c->unit_size <= 256) { 3897c266178SAlexei Starovoitov c->low_watermark = 32; 3907c266178SAlexei Starovoitov c->high_watermark = 96; 3917c266178SAlexei Starovoitov } else { 3927c266178SAlexei Starovoitov /* When page_size == 4k, order-0 cache will have low_mark == 2 3937c266178SAlexei Starovoitov * and high_mark == 6 with batch alloc of 3 individual pages at 3947c266178SAlexei Starovoitov * a time. 3957c266178SAlexei Starovoitov * 8k allocs and above low == 1, high == 3, batch == 1. 3967c266178SAlexei Starovoitov */ 3977c266178SAlexei Starovoitov c->low_watermark = max(32 * 256 / c->unit_size, 1); 3987c266178SAlexei Starovoitov c->high_watermark = max(96 * 256 / c->unit_size, 3); 3997c266178SAlexei Starovoitov } 4007c266178SAlexei Starovoitov c->batch = max((c->high_watermark - c->low_watermark) / 4 * 3, 1); 4017c266178SAlexei Starovoitov 4027c8199e2SAlexei Starovoitov /* To avoid consuming memory assume that 1st run of bpf 4037c8199e2SAlexei Starovoitov * prog won't be doing more than 4 map_update_elem from 4047c8199e2SAlexei Starovoitov * irq disabled region 4057c8199e2SAlexei Starovoitov */ 4067c8199e2SAlexei Starovoitov alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu)); 4077c8199e2SAlexei Starovoitov } 4087c8199e2SAlexei Starovoitov 409bfc03c15SAlexei Starovoitov /* When size != 0 bpf_mem_cache for each cpu. 4107c8199e2SAlexei Starovoitov * This is typical bpf hash map use case when all elements have equal size. 4117c8199e2SAlexei Starovoitov * 4127c8199e2SAlexei Starovoitov * When size == 0 allocate 11 bpf_mem_cache-s for each cpu, then rely on 4137c8199e2SAlexei Starovoitov * kmalloc/kfree. Max allocation size is 4096 in this case. 4147c8199e2SAlexei Starovoitov * This is bpf_dynptr and bpf_kptr use case. 4157c8199e2SAlexei Starovoitov */ 4164ab67149SAlexei Starovoitov int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu) 4177c8199e2SAlexei Starovoitov { 4187c8199e2SAlexei Starovoitov static u16 sizes[NUM_CACHES] = {96, 192, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096}; 4197c8199e2SAlexei Starovoitov struct bpf_mem_caches *cc, __percpu *pcc; 4207c8199e2SAlexei Starovoitov struct bpf_mem_cache *c, __percpu *pc; 4217c8199e2SAlexei Starovoitov struct obj_cgroup *objcg = NULL; 422bfc03c15SAlexei Starovoitov int cpu, i, unit_size, percpu_size = 0; 4237c8199e2SAlexei Starovoitov 4247c8199e2SAlexei Starovoitov if (size) { 4257c8199e2SAlexei Starovoitov pc = __alloc_percpu_gfp(sizeof(*pc), 8, GFP_KERNEL); 4267c8199e2SAlexei Starovoitov if (!pc) 4277c8199e2SAlexei Starovoitov return -ENOMEM; 4284ab67149SAlexei Starovoitov 429bfc03c15SAlexei Starovoitov if (percpu) 4304ab67149SAlexei Starovoitov /* room for llist_node and per-cpu pointer */ 431bfc03c15SAlexei Starovoitov percpu_size = LLIST_NODE_SZ + sizeof(void *); 432bfc03c15SAlexei Starovoitov else 4337c8199e2SAlexei Starovoitov size += LLIST_NODE_SZ; /* room for llist_node */ 4344ab67149SAlexei Starovoitov unit_size = size; 4354ab67149SAlexei Starovoitov 4367c8199e2SAlexei Starovoitov #ifdef CONFIG_MEMCG_KMEM 437ee53cbfbSYafang Shao if (memcg_bpf_enabled()) 4387c8199e2SAlexei Starovoitov objcg = get_obj_cgroup_from_current(); 4397c8199e2SAlexei Starovoitov #endif 4407c8199e2SAlexei Starovoitov for_each_possible_cpu(cpu) { 4417c8199e2SAlexei Starovoitov c = per_cpu_ptr(pc, cpu); 4424ab67149SAlexei Starovoitov c->unit_size = unit_size; 4437c8199e2SAlexei Starovoitov c->objcg = objcg; 444bfc03c15SAlexei Starovoitov c->percpu_size = percpu_size; 445822fb26bSAlexei Starovoitov c->tgt = c; 4467c8199e2SAlexei Starovoitov prefill_mem_cache(c, cpu); 4477c8199e2SAlexei Starovoitov } 4487c8199e2SAlexei Starovoitov ma->cache = pc; 4497c8199e2SAlexei Starovoitov return 0; 4507c8199e2SAlexei Starovoitov } 4517c8199e2SAlexei Starovoitov 4524ab67149SAlexei Starovoitov /* size == 0 && percpu is an invalid combination */ 4534ab67149SAlexei Starovoitov if (WARN_ON_ONCE(percpu)) 4544ab67149SAlexei Starovoitov return -EINVAL; 4554ab67149SAlexei Starovoitov 4567c8199e2SAlexei Starovoitov pcc = __alloc_percpu_gfp(sizeof(*cc), 8, GFP_KERNEL); 4577c8199e2SAlexei Starovoitov if (!pcc) 4587c8199e2SAlexei Starovoitov return -ENOMEM; 4597c8199e2SAlexei Starovoitov #ifdef CONFIG_MEMCG_KMEM 4607c8199e2SAlexei Starovoitov objcg = get_obj_cgroup_from_current(); 4617c8199e2SAlexei Starovoitov #endif 4627c8199e2SAlexei Starovoitov for_each_possible_cpu(cpu) { 4637c8199e2SAlexei Starovoitov cc = per_cpu_ptr(pcc, cpu); 4647c8199e2SAlexei Starovoitov for (i = 0; i < NUM_CACHES; i++) { 4657c8199e2SAlexei Starovoitov c = &cc->cache[i]; 4667c8199e2SAlexei Starovoitov c->unit_size = sizes[i]; 4677c8199e2SAlexei Starovoitov c->objcg = objcg; 468822fb26bSAlexei Starovoitov c->tgt = c; 4697c8199e2SAlexei Starovoitov prefill_mem_cache(c, cpu); 4707c8199e2SAlexei Starovoitov } 4717c8199e2SAlexei Starovoitov } 4727c8199e2SAlexei Starovoitov ma->caches = pcc; 4737c8199e2SAlexei Starovoitov return 0; 4747c8199e2SAlexei Starovoitov } 4757c8199e2SAlexei Starovoitov 4767c8199e2SAlexei Starovoitov static void drain_mem_cache(struct bpf_mem_cache *c) 4777c8199e2SAlexei Starovoitov { 478aa7881fcSHou Tao bool percpu = !!c->percpu_size; 4797c8199e2SAlexei Starovoitov 4809f2c6e96SAlexei Starovoitov /* No progs are using this bpf_mem_cache, but htab_map_free() called 4819f2c6e96SAlexei Starovoitov * bpf_mem_cache_free() for all remaining elements and they can be in 48212c8d0f4SAlexei Starovoitov * free_by_rcu_ttrace or in waiting_for_gp_ttrace lists, so drain those lists now. 483fa4447cbSHou Tao * 48412c8d0f4SAlexei Starovoitov * Except for waiting_for_gp_ttrace list, there are no concurrent operations 485fa4447cbSHou Tao * on these lists, so it is safe to use __llist_del_all(). 4868d5a8011SAlexei Starovoitov */ 487822fb26bSAlexei Starovoitov free_all(llist_del_all(&c->free_by_rcu_ttrace), percpu); 48812c8d0f4SAlexei Starovoitov free_all(llist_del_all(&c->waiting_for_gp_ttrace), percpu); 489aa7881fcSHou Tao free_all(__llist_del_all(&c->free_llist), percpu); 490aa7881fcSHou Tao free_all(__llist_del_all(&c->free_llist_extra), percpu); 4917c8199e2SAlexei Starovoitov } 4927c8199e2SAlexei Starovoitov 4939f2c6e96SAlexei Starovoitov static void free_mem_alloc_no_barrier(struct bpf_mem_alloc *ma) 4949f2c6e96SAlexei Starovoitov { 4959f2c6e96SAlexei Starovoitov free_percpu(ma->cache); 4969f2c6e96SAlexei Starovoitov free_percpu(ma->caches); 4979f2c6e96SAlexei Starovoitov ma->cache = NULL; 4989f2c6e96SAlexei Starovoitov ma->caches = NULL; 4999f2c6e96SAlexei Starovoitov } 5009f2c6e96SAlexei Starovoitov 5019f2c6e96SAlexei Starovoitov static void free_mem_alloc(struct bpf_mem_alloc *ma) 5029f2c6e96SAlexei Starovoitov { 50312c8d0f4SAlexei Starovoitov /* waiting_for_gp_ttrace lists was drained, but __free_rcu might 5049f2c6e96SAlexei Starovoitov * still execute. Wait for it now before we freeing percpu caches. 505822ed78fSHou Tao * 506822ed78fSHou Tao * rcu_barrier_tasks_trace() doesn't imply synchronize_rcu_tasks_trace(), 507822ed78fSHou Tao * but rcu_barrier_tasks_trace() and rcu_barrier() below are only used 508822ed78fSHou Tao * to wait for the pending __free_rcu_tasks_trace() and __free_rcu(), 509822ed78fSHou Tao * so if call_rcu(head, __free_rcu) is skipped due to 510822ed78fSHou Tao * rcu_trace_implies_rcu_gp(), it will be OK to skip rcu_barrier() by 511822ed78fSHou Tao * using rcu_trace_implies_rcu_gp() as well. 5129f2c6e96SAlexei Starovoitov */ 5139f2c6e96SAlexei Starovoitov rcu_barrier_tasks_trace(); 514822ed78fSHou Tao if (!rcu_trace_implies_rcu_gp()) 5159f2c6e96SAlexei Starovoitov rcu_barrier(); 5169f2c6e96SAlexei Starovoitov free_mem_alloc_no_barrier(ma); 5179f2c6e96SAlexei Starovoitov } 5189f2c6e96SAlexei Starovoitov 5199f2c6e96SAlexei Starovoitov static void free_mem_alloc_deferred(struct work_struct *work) 5209f2c6e96SAlexei Starovoitov { 5219f2c6e96SAlexei Starovoitov struct bpf_mem_alloc *ma = container_of(work, struct bpf_mem_alloc, work); 5229f2c6e96SAlexei Starovoitov 5239f2c6e96SAlexei Starovoitov free_mem_alloc(ma); 5249f2c6e96SAlexei Starovoitov kfree(ma); 5259f2c6e96SAlexei Starovoitov } 5269f2c6e96SAlexei Starovoitov 5279f2c6e96SAlexei Starovoitov static void destroy_mem_alloc(struct bpf_mem_alloc *ma, int rcu_in_progress) 5289f2c6e96SAlexei Starovoitov { 5299f2c6e96SAlexei Starovoitov struct bpf_mem_alloc *copy; 5309f2c6e96SAlexei Starovoitov 5319f2c6e96SAlexei Starovoitov if (!rcu_in_progress) { 5329f2c6e96SAlexei Starovoitov /* Fast path. No callbacks are pending, hence no need to do 5339f2c6e96SAlexei Starovoitov * rcu_barrier-s. 5349f2c6e96SAlexei Starovoitov */ 5359f2c6e96SAlexei Starovoitov free_mem_alloc_no_barrier(ma); 5369f2c6e96SAlexei Starovoitov return; 5379f2c6e96SAlexei Starovoitov } 5389f2c6e96SAlexei Starovoitov 539a80672d7SAlexei Starovoitov copy = kmemdup(ma, sizeof(*ma), GFP_KERNEL); 5409f2c6e96SAlexei Starovoitov if (!copy) { 5419f2c6e96SAlexei Starovoitov /* Slow path with inline barrier-s */ 5429f2c6e96SAlexei Starovoitov free_mem_alloc(ma); 5439f2c6e96SAlexei Starovoitov return; 5449f2c6e96SAlexei Starovoitov } 5459f2c6e96SAlexei Starovoitov 5469f2c6e96SAlexei Starovoitov /* Defer barriers into worker to let the rest of map memory to be freed */ 547a80672d7SAlexei Starovoitov memset(ma, 0, sizeof(*ma)); 5489f2c6e96SAlexei Starovoitov INIT_WORK(©->work, free_mem_alloc_deferred); 5499f2c6e96SAlexei Starovoitov queue_work(system_unbound_wq, ©->work); 5509f2c6e96SAlexei Starovoitov } 5519f2c6e96SAlexei Starovoitov 5527c8199e2SAlexei Starovoitov void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma) 5537c8199e2SAlexei Starovoitov { 5547c8199e2SAlexei Starovoitov struct bpf_mem_caches *cc; 5557c8199e2SAlexei Starovoitov struct bpf_mem_cache *c; 5569f2c6e96SAlexei Starovoitov int cpu, i, rcu_in_progress; 5577c8199e2SAlexei Starovoitov 5587c8199e2SAlexei Starovoitov if (ma->cache) { 5599f2c6e96SAlexei Starovoitov rcu_in_progress = 0; 5607c8199e2SAlexei Starovoitov for_each_possible_cpu(cpu) { 5617c8199e2SAlexei Starovoitov c = per_cpu_ptr(ma->cache, cpu); 562d114dde2SAlexei Starovoitov WRITE_ONCE(c->draining, true); 5633d058187SHou Tao irq_work_sync(&c->refill_work); 5647c8199e2SAlexei Starovoitov drain_mem_cache(c); 56512c8d0f4SAlexei Starovoitov rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress); 5667c8199e2SAlexei Starovoitov } 567bfc03c15SAlexei Starovoitov /* objcg is the same across cpus */ 5687c8199e2SAlexei Starovoitov if (c->objcg) 5697c8199e2SAlexei Starovoitov obj_cgroup_put(c->objcg); 5709f2c6e96SAlexei Starovoitov destroy_mem_alloc(ma, rcu_in_progress); 5717c8199e2SAlexei Starovoitov } 5727c8199e2SAlexei Starovoitov if (ma->caches) { 5739f2c6e96SAlexei Starovoitov rcu_in_progress = 0; 5747c8199e2SAlexei Starovoitov for_each_possible_cpu(cpu) { 5757c8199e2SAlexei Starovoitov cc = per_cpu_ptr(ma->caches, cpu); 5767c8199e2SAlexei Starovoitov for (i = 0; i < NUM_CACHES; i++) { 5777c8199e2SAlexei Starovoitov c = &cc->cache[i]; 578d114dde2SAlexei Starovoitov WRITE_ONCE(c->draining, true); 5793d058187SHou Tao irq_work_sync(&c->refill_work); 5807c8199e2SAlexei Starovoitov drain_mem_cache(c); 58112c8d0f4SAlexei Starovoitov rcu_in_progress += atomic_read(&c->call_rcu_ttrace_in_progress); 5827c8199e2SAlexei Starovoitov } 5837c8199e2SAlexei Starovoitov } 5847c8199e2SAlexei Starovoitov if (c->objcg) 5857c8199e2SAlexei Starovoitov obj_cgroup_put(c->objcg); 5869f2c6e96SAlexei Starovoitov destroy_mem_alloc(ma, rcu_in_progress); 5877c8199e2SAlexei Starovoitov } 5887c8199e2SAlexei Starovoitov } 5897c8199e2SAlexei Starovoitov 5907c8199e2SAlexei Starovoitov /* notrace is necessary here and in other functions to make sure 5917c8199e2SAlexei Starovoitov * bpf programs cannot attach to them and cause llist corruptions. 5927c8199e2SAlexei Starovoitov */ 5937c8199e2SAlexei Starovoitov static void notrace *unit_alloc(struct bpf_mem_cache *c) 5947c8199e2SAlexei Starovoitov { 5957c8199e2SAlexei Starovoitov struct llist_node *llnode = NULL; 5967c8199e2SAlexei Starovoitov unsigned long flags; 5977c8199e2SAlexei Starovoitov int cnt = 0; 5987c8199e2SAlexei Starovoitov 5997c8199e2SAlexei Starovoitov /* Disable irqs to prevent the following race for majority of prog types: 6007c8199e2SAlexei Starovoitov * prog_A 6017c8199e2SAlexei Starovoitov * bpf_mem_alloc 6027c8199e2SAlexei Starovoitov * preemption or irq -> prog_B 6037c8199e2SAlexei Starovoitov * bpf_mem_alloc 6047c8199e2SAlexei Starovoitov * 6057c8199e2SAlexei Starovoitov * but prog_B could be a perf_event NMI prog. 6067c8199e2SAlexei Starovoitov * Use per-cpu 'active' counter to order free_list access between 6077c8199e2SAlexei Starovoitov * unit_alloc/unit_free/bpf_mem_refill. 6087c8199e2SAlexei Starovoitov */ 6097c8199e2SAlexei Starovoitov local_irq_save(flags); 6107c8199e2SAlexei Starovoitov if (local_inc_return(&c->active) == 1) { 6117c8199e2SAlexei Starovoitov llnode = __llist_del_first(&c->free_llist); 612822fb26bSAlexei Starovoitov if (llnode) { 6137c8199e2SAlexei Starovoitov cnt = --c->free_cnt; 614822fb26bSAlexei Starovoitov *(struct bpf_mem_cache **)llnode = c; 615822fb26bSAlexei Starovoitov } 6167c8199e2SAlexei Starovoitov } 6177c8199e2SAlexei Starovoitov local_dec(&c->active); 6187c8199e2SAlexei Starovoitov local_irq_restore(flags); 6197c8199e2SAlexei Starovoitov 6207c8199e2SAlexei Starovoitov WARN_ON(cnt < 0); 6217c8199e2SAlexei Starovoitov 6227c266178SAlexei Starovoitov if (cnt < c->low_watermark) 6237c8199e2SAlexei Starovoitov irq_work_raise(c); 6247c8199e2SAlexei Starovoitov return llnode; 6257c8199e2SAlexei Starovoitov } 6267c8199e2SAlexei Starovoitov 6277c8199e2SAlexei Starovoitov /* Though 'ptr' object could have been allocated on a different cpu 6287c8199e2SAlexei Starovoitov * add it to the free_llist of the current cpu. 6297c8199e2SAlexei Starovoitov * Let kfree() logic deal with it when it's later called from irq_work. 6307c8199e2SAlexei Starovoitov */ 6317c8199e2SAlexei Starovoitov static void notrace unit_free(struct bpf_mem_cache *c, void *ptr) 6327c8199e2SAlexei Starovoitov { 6337c8199e2SAlexei Starovoitov struct llist_node *llnode = ptr - LLIST_NODE_SZ; 6347c8199e2SAlexei Starovoitov unsigned long flags; 6357c8199e2SAlexei Starovoitov int cnt = 0; 6367c8199e2SAlexei Starovoitov 6377c8199e2SAlexei Starovoitov BUILD_BUG_ON(LLIST_NODE_SZ > 8); 6387c8199e2SAlexei Starovoitov 639822fb26bSAlexei Starovoitov /* 640822fb26bSAlexei Starovoitov * Remember bpf_mem_cache that allocated this object. 641822fb26bSAlexei Starovoitov * The hint is not accurate. 642822fb26bSAlexei Starovoitov */ 643822fb26bSAlexei Starovoitov c->tgt = *(struct bpf_mem_cache **)llnode; 644822fb26bSAlexei Starovoitov 6457c8199e2SAlexei Starovoitov local_irq_save(flags); 6467c8199e2SAlexei Starovoitov if (local_inc_return(&c->active) == 1) { 6477c8199e2SAlexei Starovoitov __llist_add(llnode, &c->free_llist); 6487c8199e2SAlexei Starovoitov cnt = ++c->free_cnt; 6497c8199e2SAlexei Starovoitov } else { 6507c8199e2SAlexei Starovoitov /* unit_free() cannot fail. Therefore add an object to atomic 6517c8199e2SAlexei Starovoitov * llist. free_bulk() will drain it. Though free_llist_extra is 6527c8199e2SAlexei Starovoitov * a per-cpu list we have to use atomic llist_add here, since 6537c8199e2SAlexei Starovoitov * it also can be interrupted by bpf nmi prog that does another 6547c8199e2SAlexei Starovoitov * unit_free() into the same free_llist_extra. 6557c8199e2SAlexei Starovoitov */ 6567c8199e2SAlexei Starovoitov llist_add(llnode, &c->free_llist_extra); 6577c8199e2SAlexei Starovoitov } 6587c8199e2SAlexei Starovoitov local_dec(&c->active); 6597c8199e2SAlexei Starovoitov local_irq_restore(flags); 6607c8199e2SAlexei Starovoitov 6617c266178SAlexei Starovoitov if (cnt > c->high_watermark) 6627c8199e2SAlexei Starovoitov /* free few objects from current cpu into global kmalloc pool */ 6637c8199e2SAlexei Starovoitov irq_work_raise(c); 6647c8199e2SAlexei Starovoitov } 6657c8199e2SAlexei Starovoitov 6667c8199e2SAlexei Starovoitov /* Called from BPF program or from sys_bpf syscall. 6677c8199e2SAlexei Starovoitov * In both cases migration is disabled. 6687c8199e2SAlexei Starovoitov */ 6697c8199e2SAlexei Starovoitov void notrace *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size) 6707c8199e2SAlexei Starovoitov { 6717c8199e2SAlexei Starovoitov int idx; 6727c8199e2SAlexei Starovoitov void *ret; 6737c8199e2SAlexei Starovoitov 6747c8199e2SAlexei Starovoitov if (!size) 6757c8199e2SAlexei Starovoitov return ZERO_SIZE_PTR; 6767c8199e2SAlexei Starovoitov 6777c8199e2SAlexei Starovoitov idx = bpf_mem_cache_idx(size + LLIST_NODE_SZ); 6787c8199e2SAlexei Starovoitov if (idx < 0) 6797c8199e2SAlexei Starovoitov return NULL; 6807c8199e2SAlexei Starovoitov 6817c8199e2SAlexei Starovoitov ret = unit_alloc(this_cpu_ptr(ma->caches)->cache + idx); 6827c8199e2SAlexei Starovoitov return !ret ? NULL : ret + LLIST_NODE_SZ; 6837c8199e2SAlexei Starovoitov } 6847c8199e2SAlexei Starovoitov 6857c8199e2SAlexei Starovoitov void notrace bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr) 6867c8199e2SAlexei Starovoitov { 6877c8199e2SAlexei Starovoitov int idx; 6887c8199e2SAlexei Starovoitov 6897c8199e2SAlexei Starovoitov if (!ptr) 6907c8199e2SAlexei Starovoitov return; 6917c8199e2SAlexei Starovoitov 6921e660f7eSAlexei Starovoitov idx = bpf_mem_cache_idx(ksize(ptr - LLIST_NODE_SZ)); 6937c8199e2SAlexei Starovoitov if (idx < 0) 6947c8199e2SAlexei Starovoitov return; 6957c8199e2SAlexei Starovoitov 6967c8199e2SAlexei Starovoitov unit_free(this_cpu_ptr(ma->caches)->cache + idx, ptr); 6977c8199e2SAlexei Starovoitov } 6987c8199e2SAlexei Starovoitov 6997c8199e2SAlexei Starovoitov void notrace *bpf_mem_cache_alloc(struct bpf_mem_alloc *ma) 7007c8199e2SAlexei Starovoitov { 7017c8199e2SAlexei Starovoitov void *ret; 7027c8199e2SAlexei Starovoitov 7037c8199e2SAlexei Starovoitov ret = unit_alloc(this_cpu_ptr(ma->cache)); 7047c8199e2SAlexei Starovoitov return !ret ? NULL : ret + LLIST_NODE_SZ; 7057c8199e2SAlexei Starovoitov } 7067c8199e2SAlexei Starovoitov 7077c8199e2SAlexei Starovoitov void notrace bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr) 7087c8199e2SAlexei Starovoitov { 7097c8199e2SAlexei Starovoitov if (!ptr) 7107c8199e2SAlexei Starovoitov return; 7117c8199e2SAlexei Starovoitov 7127c8199e2SAlexei Starovoitov unit_free(this_cpu_ptr(ma->cache), ptr); 7137c8199e2SAlexei Starovoitov } 714e65a5c6eSMartin KaFai Lau 715e65a5c6eSMartin KaFai Lau /* Directly does a kfree() without putting 'ptr' back to the free_llist 716e65a5c6eSMartin KaFai Lau * for reuse and without waiting for a rcu_tasks_trace gp. 717e65a5c6eSMartin KaFai Lau * The caller must first go through the rcu_tasks_trace gp for 'ptr' 718e65a5c6eSMartin KaFai Lau * before calling bpf_mem_cache_raw_free(). 719e65a5c6eSMartin KaFai Lau * It could be used when the rcu_tasks_trace callback does not have 720e65a5c6eSMartin KaFai Lau * a hold on the original bpf_mem_alloc object that allocated the 721e65a5c6eSMartin KaFai Lau * 'ptr'. This should only be used in the uncommon code path. 722e65a5c6eSMartin KaFai Lau * Otherwise, the bpf_mem_alloc's free_llist cannot be refilled 723e65a5c6eSMartin KaFai Lau * and may affect performance. 724e65a5c6eSMartin KaFai Lau */ 725e65a5c6eSMartin KaFai Lau void bpf_mem_cache_raw_free(void *ptr) 726e65a5c6eSMartin KaFai Lau { 727e65a5c6eSMartin KaFai Lau if (!ptr) 728e65a5c6eSMartin KaFai Lau return; 729e65a5c6eSMartin KaFai Lau 730e65a5c6eSMartin KaFai Lau kfree(ptr - LLIST_NODE_SZ); 731e65a5c6eSMartin KaFai Lau } 732e65a5c6eSMartin KaFai Lau 733e65a5c6eSMartin KaFai Lau /* When flags == GFP_KERNEL, it signals that the caller will not cause 734e65a5c6eSMartin KaFai Lau * deadlock when using kmalloc. bpf_mem_cache_alloc_flags() will use 735e65a5c6eSMartin KaFai Lau * kmalloc if the free_llist is empty. 736e65a5c6eSMartin KaFai Lau */ 737e65a5c6eSMartin KaFai Lau void notrace *bpf_mem_cache_alloc_flags(struct bpf_mem_alloc *ma, gfp_t flags) 738e65a5c6eSMartin KaFai Lau { 739e65a5c6eSMartin KaFai Lau struct bpf_mem_cache *c; 740e65a5c6eSMartin KaFai Lau void *ret; 741e65a5c6eSMartin KaFai Lau 742e65a5c6eSMartin KaFai Lau c = this_cpu_ptr(ma->cache); 743e65a5c6eSMartin KaFai Lau 744e65a5c6eSMartin KaFai Lau ret = unit_alloc(c); 745e65a5c6eSMartin KaFai Lau if (!ret && flags == GFP_KERNEL) { 746e65a5c6eSMartin KaFai Lau struct mem_cgroup *memcg, *old_memcg; 747e65a5c6eSMartin KaFai Lau 748e65a5c6eSMartin KaFai Lau memcg = get_memcg(c); 749e65a5c6eSMartin KaFai Lau old_memcg = set_active_memcg(memcg); 750e65a5c6eSMartin KaFai Lau ret = __alloc(c, NUMA_NO_NODE, GFP_KERNEL | __GFP_NOWARN | __GFP_ACCOUNT); 751e65a5c6eSMartin KaFai Lau set_active_memcg(old_memcg); 752e65a5c6eSMartin KaFai Lau mem_cgroup_put(memcg); 753e65a5c6eSMartin KaFai Lau } 754e65a5c6eSMartin KaFai Lau 755e65a5c6eSMartin KaFai Lau return !ret ? NULL : ret + LLIST_NODE_SZ; 756e65a5c6eSMartin KaFai Lau } 757