17c8199e2SAlexei Starovoitov // SPDX-License-Identifier: GPL-2.0-only 27c8199e2SAlexei Starovoitov /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ 37c8199e2SAlexei Starovoitov #include <linux/mm.h> 47c8199e2SAlexei Starovoitov #include <linux/llist.h> 57c8199e2SAlexei Starovoitov #include <linux/bpf.h> 67c8199e2SAlexei Starovoitov #include <linux/irq_work.h> 77c8199e2SAlexei Starovoitov #include <linux/bpf_mem_alloc.h> 87c8199e2SAlexei Starovoitov #include <linux/memcontrol.h> 97c8199e2SAlexei Starovoitov #include <asm/local.h> 107c8199e2SAlexei Starovoitov 117c8199e2SAlexei Starovoitov /* Any context (including NMI) BPF specific memory allocator. 127c8199e2SAlexei Starovoitov * 137c8199e2SAlexei Starovoitov * Tracing BPF programs can attach to kprobe and fentry. Hence they 147c8199e2SAlexei Starovoitov * run in unknown context where calling plain kmalloc() might not be safe. 157c8199e2SAlexei Starovoitov * 167c8199e2SAlexei Starovoitov * Front-end kmalloc() with per-cpu per-bucket cache of free elements. 177c8199e2SAlexei Starovoitov * Refill this cache asynchronously from irq_work. 187c8199e2SAlexei Starovoitov * 197c8199e2SAlexei Starovoitov * CPU_0 buckets 207c8199e2SAlexei Starovoitov * 16 32 64 96 128 196 256 512 1024 2048 4096 217c8199e2SAlexei Starovoitov * ... 227c8199e2SAlexei Starovoitov * CPU_N buckets 237c8199e2SAlexei Starovoitov * 16 32 64 96 128 196 256 512 1024 2048 4096 247c8199e2SAlexei Starovoitov * 257c8199e2SAlexei Starovoitov * The buckets are prefilled at the start. 267c8199e2SAlexei Starovoitov * BPF programs always run with migration disabled. 277c8199e2SAlexei Starovoitov * It's safe to allocate from cache of the current cpu with irqs disabled. 287c8199e2SAlexei Starovoitov * Free-ing is always done into bucket of the current cpu as well. 297c8199e2SAlexei Starovoitov * irq_work trims extra free elements from buckets with kfree 307c8199e2SAlexei Starovoitov * and refills them with kmalloc, so global kmalloc logic takes care 317c8199e2SAlexei Starovoitov * of freeing objects allocated by one cpu and freed on another. 327c8199e2SAlexei Starovoitov * 337c8199e2SAlexei Starovoitov * Every allocated objected is padded with extra 8 bytes that contains 347c8199e2SAlexei Starovoitov * struct llist_node. 357c8199e2SAlexei Starovoitov */ 367c8199e2SAlexei Starovoitov #define LLIST_NODE_SZ sizeof(struct llist_node) 377c8199e2SAlexei Starovoitov 387c8199e2SAlexei Starovoitov /* similar to kmalloc, but sizeof == 8 bucket is gone */ 397c8199e2SAlexei Starovoitov static u8 size_index[24] __ro_after_init = { 407c8199e2SAlexei Starovoitov 3, /* 8 */ 417c8199e2SAlexei Starovoitov 3, /* 16 */ 427c8199e2SAlexei Starovoitov 4, /* 24 */ 437c8199e2SAlexei Starovoitov 4, /* 32 */ 447c8199e2SAlexei Starovoitov 5, /* 40 */ 457c8199e2SAlexei Starovoitov 5, /* 48 */ 467c8199e2SAlexei Starovoitov 5, /* 56 */ 477c8199e2SAlexei Starovoitov 5, /* 64 */ 487c8199e2SAlexei Starovoitov 1, /* 72 */ 497c8199e2SAlexei Starovoitov 1, /* 80 */ 507c8199e2SAlexei Starovoitov 1, /* 88 */ 517c8199e2SAlexei Starovoitov 1, /* 96 */ 527c8199e2SAlexei Starovoitov 6, /* 104 */ 537c8199e2SAlexei Starovoitov 6, /* 112 */ 547c8199e2SAlexei Starovoitov 6, /* 120 */ 557c8199e2SAlexei Starovoitov 6, /* 128 */ 567c8199e2SAlexei Starovoitov 2, /* 136 */ 577c8199e2SAlexei Starovoitov 2, /* 144 */ 587c8199e2SAlexei Starovoitov 2, /* 152 */ 597c8199e2SAlexei Starovoitov 2, /* 160 */ 607c8199e2SAlexei Starovoitov 2, /* 168 */ 617c8199e2SAlexei Starovoitov 2, /* 176 */ 627c8199e2SAlexei Starovoitov 2, /* 184 */ 637c8199e2SAlexei Starovoitov 2 /* 192 */ 647c8199e2SAlexei Starovoitov }; 657c8199e2SAlexei Starovoitov 667c8199e2SAlexei Starovoitov static int bpf_mem_cache_idx(size_t size) 677c8199e2SAlexei Starovoitov { 687c8199e2SAlexei Starovoitov if (!size || size > 4096) 697c8199e2SAlexei Starovoitov return -1; 707c8199e2SAlexei Starovoitov 717c8199e2SAlexei Starovoitov if (size <= 192) 727c8199e2SAlexei Starovoitov return size_index[(size - 1) / 8] - 1; 737c8199e2SAlexei Starovoitov 747c8199e2SAlexei Starovoitov return fls(size - 1) - 1; 757c8199e2SAlexei Starovoitov } 767c8199e2SAlexei Starovoitov 777c8199e2SAlexei Starovoitov #define NUM_CACHES 11 787c8199e2SAlexei Starovoitov 797c8199e2SAlexei Starovoitov struct bpf_mem_cache { 807c8199e2SAlexei Starovoitov /* per-cpu list of free objects of size 'unit_size'. 817c8199e2SAlexei Starovoitov * All accesses are done with interrupts disabled and 'active' counter 827c8199e2SAlexei Starovoitov * protection with __llist_add() and __llist_del_first(). 837c8199e2SAlexei Starovoitov */ 847c8199e2SAlexei Starovoitov struct llist_head free_llist; 857c8199e2SAlexei Starovoitov local_t active; 867c8199e2SAlexei Starovoitov 877c8199e2SAlexei Starovoitov /* Operations on the free_list from unit_alloc/unit_free/bpf_mem_refill 887c8199e2SAlexei Starovoitov * are sequenced by per-cpu 'active' counter. But unit_free() cannot 897c8199e2SAlexei Starovoitov * fail. When 'active' is busy the unit_free() will add an object to 907c8199e2SAlexei Starovoitov * free_llist_extra. 917c8199e2SAlexei Starovoitov */ 927c8199e2SAlexei Starovoitov struct llist_head free_llist_extra; 937c8199e2SAlexei Starovoitov 947c8199e2SAlexei Starovoitov /* kmem_cache != NULL when bpf_mem_alloc was created for specific 957c8199e2SAlexei Starovoitov * element size. 967c8199e2SAlexei Starovoitov */ 977c8199e2SAlexei Starovoitov struct kmem_cache *kmem_cache; 987c8199e2SAlexei Starovoitov struct irq_work refill_work; 997c8199e2SAlexei Starovoitov struct obj_cgroup *objcg; 1007c8199e2SAlexei Starovoitov int unit_size; 1017c8199e2SAlexei Starovoitov /* count of objects in free_llist */ 1027c8199e2SAlexei Starovoitov int free_cnt; 1037c266178SAlexei Starovoitov int low_watermark, high_watermark, batch; 104*8d5a8011SAlexei Starovoitov 105*8d5a8011SAlexei Starovoitov struct rcu_head rcu; 106*8d5a8011SAlexei Starovoitov struct llist_head free_by_rcu; 107*8d5a8011SAlexei Starovoitov struct llist_head waiting_for_gp; 108*8d5a8011SAlexei Starovoitov atomic_t call_rcu_in_progress; 1097c8199e2SAlexei Starovoitov }; 1107c8199e2SAlexei Starovoitov 1117c8199e2SAlexei Starovoitov struct bpf_mem_caches { 1127c8199e2SAlexei Starovoitov struct bpf_mem_cache cache[NUM_CACHES]; 1137c8199e2SAlexei Starovoitov }; 1147c8199e2SAlexei Starovoitov 1157c8199e2SAlexei Starovoitov static struct llist_node notrace *__llist_del_first(struct llist_head *head) 1167c8199e2SAlexei Starovoitov { 1177c8199e2SAlexei Starovoitov struct llist_node *entry, *next; 1187c8199e2SAlexei Starovoitov 1197c8199e2SAlexei Starovoitov entry = head->first; 1207c8199e2SAlexei Starovoitov if (!entry) 1217c8199e2SAlexei Starovoitov return NULL; 1227c8199e2SAlexei Starovoitov next = entry->next; 1237c8199e2SAlexei Starovoitov head->first = next; 1247c8199e2SAlexei Starovoitov return entry; 1257c8199e2SAlexei Starovoitov } 1267c8199e2SAlexei Starovoitov 1277c8199e2SAlexei Starovoitov static void *__alloc(struct bpf_mem_cache *c, int node) 1287c8199e2SAlexei Starovoitov { 1297c8199e2SAlexei Starovoitov /* Allocate, but don't deplete atomic reserves that typical 1307c8199e2SAlexei Starovoitov * GFP_ATOMIC would do. irq_work runs on this cpu and kmalloc 1317c8199e2SAlexei Starovoitov * will allocate from the current numa node which is what we 1327c8199e2SAlexei Starovoitov * want here. 1337c8199e2SAlexei Starovoitov */ 1347c8199e2SAlexei Starovoitov gfp_t flags = GFP_NOWAIT | __GFP_NOWARN | __GFP_ACCOUNT; 1357c8199e2SAlexei Starovoitov 1367c8199e2SAlexei Starovoitov if (c->kmem_cache) 1377c8199e2SAlexei Starovoitov return kmem_cache_alloc_node(c->kmem_cache, flags, node); 1387c8199e2SAlexei Starovoitov 1397c8199e2SAlexei Starovoitov return kmalloc_node(c->unit_size, flags, node); 1407c8199e2SAlexei Starovoitov } 1417c8199e2SAlexei Starovoitov 1427c8199e2SAlexei Starovoitov static struct mem_cgroup *get_memcg(const struct bpf_mem_cache *c) 1437c8199e2SAlexei Starovoitov { 1447c8199e2SAlexei Starovoitov #ifdef CONFIG_MEMCG_KMEM 1457c8199e2SAlexei Starovoitov if (c->objcg) 1467c8199e2SAlexei Starovoitov return get_mem_cgroup_from_objcg(c->objcg); 1477c8199e2SAlexei Starovoitov #endif 1487c8199e2SAlexei Starovoitov 1497c8199e2SAlexei Starovoitov #ifdef CONFIG_MEMCG 1507c8199e2SAlexei Starovoitov return root_mem_cgroup; 1517c8199e2SAlexei Starovoitov #else 1527c8199e2SAlexei Starovoitov return NULL; 1537c8199e2SAlexei Starovoitov #endif 1547c8199e2SAlexei Starovoitov } 1557c8199e2SAlexei Starovoitov 1567c8199e2SAlexei Starovoitov /* Mostly runs from irq_work except __init phase. */ 1577c8199e2SAlexei Starovoitov static void alloc_bulk(struct bpf_mem_cache *c, int cnt, int node) 1587c8199e2SAlexei Starovoitov { 1597c8199e2SAlexei Starovoitov struct mem_cgroup *memcg = NULL, *old_memcg; 1607c8199e2SAlexei Starovoitov unsigned long flags; 1617c8199e2SAlexei Starovoitov void *obj; 1627c8199e2SAlexei Starovoitov int i; 1637c8199e2SAlexei Starovoitov 1647c8199e2SAlexei Starovoitov memcg = get_memcg(c); 1657c8199e2SAlexei Starovoitov old_memcg = set_active_memcg(memcg); 1667c8199e2SAlexei Starovoitov for (i = 0; i < cnt; i++) { 1677c8199e2SAlexei Starovoitov obj = __alloc(c, node); 1687c8199e2SAlexei Starovoitov if (!obj) 1697c8199e2SAlexei Starovoitov break; 1707c8199e2SAlexei Starovoitov if (IS_ENABLED(CONFIG_PREEMPT_RT)) 1717c8199e2SAlexei Starovoitov /* In RT irq_work runs in per-cpu kthread, so disable 1727c8199e2SAlexei Starovoitov * interrupts to avoid preemption and interrupts and 1737c8199e2SAlexei Starovoitov * reduce the chance of bpf prog executing on this cpu 1747c8199e2SAlexei Starovoitov * when active counter is busy. 1757c8199e2SAlexei Starovoitov */ 1767c8199e2SAlexei Starovoitov local_irq_save(flags); 1777c8199e2SAlexei Starovoitov /* alloc_bulk runs from irq_work which will not preempt a bpf 1787c8199e2SAlexei Starovoitov * program that does unit_alloc/unit_free since IRQs are 1797c8199e2SAlexei Starovoitov * disabled there. There is no race to increment 'active' 1807c8199e2SAlexei Starovoitov * counter. It protects free_llist from corruption in case NMI 1817c8199e2SAlexei Starovoitov * bpf prog preempted this loop. 1827c8199e2SAlexei Starovoitov */ 1837c8199e2SAlexei Starovoitov WARN_ON_ONCE(local_inc_return(&c->active) != 1); 1847c8199e2SAlexei Starovoitov __llist_add(obj, &c->free_llist); 1857c8199e2SAlexei Starovoitov c->free_cnt++; 1867c8199e2SAlexei Starovoitov local_dec(&c->active); 1877c8199e2SAlexei Starovoitov if (IS_ENABLED(CONFIG_PREEMPT_RT)) 1887c8199e2SAlexei Starovoitov local_irq_restore(flags); 1897c8199e2SAlexei Starovoitov } 1907c8199e2SAlexei Starovoitov set_active_memcg(old_memcg); 1917c8199e2SAlexei Starovoitov mem_cgroup_put(memcg); 1927c8199e2SAlexei Starovoitov } 1937c8199e2SAlexei Starovoitov 1947c8199e2SAlexei Starovoitov static void free_one(struct bpf_mem_cache *c, void *obj) 1957c8199e2SAlexei Starovoitov { 1967c8199e2SAlexei Starovoitov if (c->kmem_cache) 1977c8199e2SAlexei Starovoitov kmem_cache_free(c->kmem_cache, obj); 1987c8199e2SAlexei Starovoitov else 1997c8199e2SAlexei Starovoitov kfree(obj); 2007c8199e2SAlexei Starovoitov } 2017c8199e2SAlexei Starovoitov 202*8d5a8011SAlexei Starovoitov static void __free_rcu(struct rcu_head *head) 203*8d5a8011SAlexei Starovoitov { 204*8d5a8011SAlexei Starovoitov struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu); 205*8d5a8011SAlexei Starovoitov struct llist_node *llnode = llist_del_all(&c->waiting_for_gp); 206*8d5a8011SAlexei Starovoitov struct llist_node *pos, *t; 207*8d5a8011SAlexei Starovoitov 208*8d5a8011SAlexei Starovoitov llist_for_each_safe(pos, t, llnode) 209*8d5a8011SAlexei Starovoitov free_one(c, pos); 210*8d5a8011SAlexei Starovoitov atomic_set(&c->call_rcu_in_progress, 0); 211*8d5a8011SAlexei Starovoitov } 212*8d5a8011SAlexei Starovoitov 213*8d5a8011SAlexei Starovoitov static void enque_to_free(struct bpf_mem_cache *c, void *obj) 214*8d5a8011SAlexei Starovoitov { 215*8d5a8011SAlexei Starovoitov struct llist_node *llnode = obj; 216*8d5a8011SAlexei Starovoitov 217*8d5a8011SAlexei Starovoitov /* bpf_mem_cache is a per-cpu object. Freeing happens in irq_work. 218*8d5a8011SAlexei Starovoitov * Nothing races to add to free_by_rcu list. 219*8d5a8011SAlexei Starovoitov */ 220*8d5a8011SAlexei Starovoitov __llist_add(llnode, &c->free_by_rcu); 221*8d5a8011SAlexei Starovoitov } 222*8d5a8011SAlexei Starovoitov 223*8d5a8011SAlexei Starovoitov static void do_call_rcu(struct bpf_mem_cache *c) 224*8d5a8011SAlexei Starovoitov { 225*8d5a8011SAlexei Starovoitov struct llist_node *llnode, *t; 226*8d5a8011SAlexei Starovoitov 227*8d5a8011SAlexei Starovoitov if (atomic_xchg(&c->call_rcu_in_progress, 1)) 228*8d5a8011SAlexei Starovoitov return; 229*8d5a8011SAlexei Starovoitov 230*8d5a8011SAlexei Starovoitov WARN_ON_ONCE(!llist_empty(&c->waiting_for_gp)); 231*8d5a8011SAlexei Starovoitov llist_for_each_safe(llnode, t, __llist_del_all(&c->free_by_rcu)) 232*8d5a8011SAlexei Starovoitov /* There is no concurrent __llist_add(waiting_for_gp) access. 233*8d5a8011SAlexei Starovoitov * It doesn't race with llist_del_all either. 234*8d5a8011SAlexei Starovoitov * But there could be two concurrent llist_del_all(waiting_for_gp): 235*8d5a8011SAlexei Starovoitov * from __free_rcu() and from drain_mem_cache(). 236*8d5a8011SAlexei Starovoitov */ 237*8d5a8011SAlexei Starovoitov __llist_add(llnode, &c->waiting_for_gp); 238*8d5a8011SAlexei Starovoitov call_rcu(&c->rcu, __free_rcu); 239*8d5a8011SAlexei Starovoitov } 240*8d5a8011SAlexei Starovoitov 2417c8199e2SAlexei Starovoitov static void free_bulk(struct bpf_mem_cache *c) 2427c8199e2SAlexei Starovoitov { 2437c8199e2SAlexei Starovoitov struct llist_node *llnode, *t; 2447c8199e2SAlexei Starovoitov unsigned long flags; 2457c8199e2SAlexei Starovoitov int cnt; 2467c8199e2SAlexei Starovoitov 2477c8199e2SAlexei Starovoitov do { 2487c8199e2SAlexei Starovoitov if (IS_ENABLED(CONFIG_PREEMPT_RT)) 2497c8199e2SAlexei Starovoitov local_irq_save(flags); 2507c8199e2SAlexei Starovoitov WARN_ON_ONCE(local_inc_return(&c->active) != 1); 2517c8199e2SAlexei Starovoitov llnode = __llist_del_first(&c->free_llist); 2527c8199e2SAlexei Starovoitov if (llnode) 2537c8199e2SAlexei Starovoitov cnt = --c->free_cnt; 2547c8199e2SAlexei Starovoitov else 2557c8199e2SAlexei Starovoitov cnt = 0; 2567c8199e2SAlexei Starovoitov local_dec(&c->active); 2577c8199e2SAlexei Starovoitov if (IS_ENABLED(CONFIG_PREEMPT_RT)) 2587c8199e2SAlexei Starovoitov local_irq_restore(flags); 259*8d5a8011SAlexei Starovoitov enque_to_free(c, llnode); 2607c266178SAlexei Starovoitov } while (cnt > (c->high_watermark + c->low_watermark) / 2); 2617c8199e2SAlexei Starovoitov 2627c8199e2SAlexei Starovoitov /* and drain free_llist_extra */ 2637c8199e2SAlexei Starovoitov llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra)) 264*8d5a8011SAlexei Starovoitov enque_to_free(c, llnode); 265*8d5a8011SAlexei Starovoitov do_call_rcu(c); 2667c8199e2SAlexei Starovoitov } 2677c8199e2SAlexei Starovoitov 2687c8199e2SAlexei Starovoitov static void bpf_mem_refill(struct irq_work *work) 2697c8199e2SAlexei Starovoitov { 2707c8199e2SAlexei Starovoitov struct bpf_mem_cache *c = container_of(work, struct bpf_mem_cache, refill_work); 2717c8199e2SAlexei Starovoitov int cnt; 2727c8199e2SAlexei Starovoitov 2737c8199e2SAlexei Starovoitov /* Racy access to free_cnt. It doesn't need to be 100% accurate */ 2747c8199e2SAlexei Starovoitov cnt = c->free_cnt; 2757c266178SAlexei Starovoitov if (cnt < c->low_watermark) 2767c8199e2SAlexei Starovoitov /* irq_work runs on this cpu and kmalloc will allocate 2777c8199e2SAlexei Starovoitov * from the current numa node which is what we want here. 2787c8199e2SAlexei Starovoitov */ 2797c266178SAlexei Starovoitov alloc_bulk(c, c->batch, NUMA_NO_NODE); 2807c266178SAlexei Starovoitov else if (cnt > c->high_watermark) 2817c8199e2SAlexei Starovoitov free_bulk(c); 2827c8199e2SAlexei Starovoitov } 2837c8199e2SAlexei Starovoitov 2847c8199e2SAlexei Starovoitov static void notrace irq_work_raise(struct bpf_mem_cache *c) 2857c8199e2SAlexei Starovoitov { 2867c8199e2SAlexei Starovoitov irq_work_queue(&c->refill_work); 2877c8199e2SAlexei Starovoitov } 2887c8199e2SAlexei Starovoitov 2897c266178SAlexei Starovoitov /* For typical bpf map case that uses bpf_mem_cache_alloc and single bucket 2907c266178SAlexei Starovoitov * the freelist cache will be elem_size * 64 (or less) on each cpu. 2917c266178SAlexei Starovoitov * 2927c266178SAlexei Starovoitov * For bpf programs that don't have statically known allocation sizes and 2937c266178SAlexei Starovoitov * assuming (low_mark + high_mark) / 2 as an average number of elements per 2947c266178SAlexei Starovoitov * bucket and all buckets are used the total amount of memory in freelists 2957c266178SAlexei Starovoitov * on each cpu will be: 2967c266178SAlexei Starovoitov * 64*16 + 64*32 + 64*64 + 64*96 + 64*128 + 64*196 + 64*256 + 32*512 + 16*1024 + 8*2048 + 4*4096 2977c266178SAlexei Starovoitov * == ~ 116 Kbyte using below heuristic. 2987c266178SAlexei Starovoitov * Initialized, but unused bpf allocator (not bpf map specific one) will 2997c266178SAlexei Starovoitov * consume ~ 11 Kbyte per cpu. 3007c266178SAlexei Starovoitov * Typical case will be between 11K and 116K closer to 11K. 3017c266178SAlexei Starovoitov * bpf progs can and should share bpf_mem_cache when possible. 3027c266178SAlexei Starovoitov */ 3037c266178SAlexei Starovoitov 3047c8199e2SAlexei Starovoitov static void prefill_mem_cache(struct bpf_mem_cache *c, int cpu) 3057c8199e2SAlexei Starovoitov { 3067c8199e2SAlexei Starovoitov init_irq_work(&c->refill_work, bpf_mem_refill); 3077c266178SAlexei Starovoitov if (c->unit_size <= 256) { 3087c266178SAlexei Starovoitov c->low_watermark = 32; 3097c266178SAlexei Starovoitov c->high_watermark = 96; 3107c266178SAlexei Starovoitov } else { 3117c266178SAlexei Starovoitov /* When page_size == 4k, order-0 cache will have low_mark == 2 3127c266178SAlexei Starovoitov * and high_mark == 6 with batch alloc of 3 individual pages at 3137c266178SAlexei Starovoitov * a time. 3147c266178SAlexei Starovoitov * 8k allocs and above low == 1, high == 3, batch == 1. 3157c266178SAlexei Starovoitov */ 3167c266178SAlexei Starovoitov c->low_watermark = max(32 * 256 / c->unit_size, 1); 3177c266178SAlexei Starovoitov c->high_watermark = max(96 * 256 / c->unit_size, 3); 3187c266178SAlexei Starovoitov } 3197c266178SAlexei Starovoitov c->batch = max((c->high_watermark - c->low_watermark) / 4 * 3, 1); 3207c266178SAlexei Starovoitov 3217c8199e2SAlexei Starovoitov /* To avoid consuming memory assume that 1st run of bpf 3227c8199e2SAlexei Starovoitov * prog won't be doing more than 4 map_update_elem from 3237c8199e2SAlexei Starovoitov * irq disabled region 3247c8199e2SAlexei Starovoitov */ 3257c8199e2SAlexei Starovoitov alloc_bulk(c, c->unit_size <= 256 ? 4 : 1, cpu_to_node(cpu)); 3267c8199e2SAlexei Starovoitov } 3277c8199e2SAlexei Starovoitov 3287c8199e2SAlexei Starovoitov /* When size != 0 create kmem_cache and bpf_mem_cache for each cpu. 3297c8199e2SAlexei Starovoitov * This is typical bpf hash map use case when all elements have equal size. 3307c8199e2SAlexei Starovoitov * 3317c8199e2SAlexei Starovoitov * When size == 0 allocate 11 bpf_mem_cache-s for each cpu, then rely on 3327c8199e2SAlexei Starovoitov * kmalloc/kfree. Max allocation size is 4096 in this case. 3337c8199e2SAlexei Starovoitov * This is bpf_dynptr and bpf_kptr use case. 3347c8199e2SAlexei Starovoitov */ 3357c8199e2SAlexei Starovoitov int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size) 3367c8199e2SAlexei Starovoitov { 3377c8199e2SAlexei Starovoitov static u16 sizes[NUM_CACHES] = {96, 192, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096}; 3387c8199e2SAlexei Starovoitov struct bpf_mem_caches *cc, __percpu *pcc; 3397c8199e2SAlexei Starovoitov struct bpf_mem_cache *c, __percpu *pc; 3407c8199e2SAlexei Starovoitov struct kmem_cache *kmem_cache; 3417c8199e2SAlexei Starovoitov struct obj_cgroup *objcg = NULL; 3427c8199e2SAlexei Starovoitov char buf[32]; 3437c8199e2SAlexei Starovoitov int cpu, i; 3447c8199e2SAlexei Starovoitov 3457c8199e2SAlexei Starovoitov if (size) { 3467c8199e2SAlexei Starovoitov pc = __alloc_percpu_gfp(sizeof(*pc), 8, GFP_KERNEL); 3477c8199e2SAlexei Starovoitov if (!pc) 3487c8199e2SAlexei Starovoitov return -ENOMEM; 3497c8199e2SAlexei Starovoitov size += LLIST_NODE_SZ; /* room for llist_node */ 3507c8199e2SAlexei Starovoitov snprintf(buf, sizeof(buf), "bpf-%u", size); 351*8d5a8011SAlexei Starovoitov kmem_cache = kmem_cache_create(buf, size, 8, 0, NULL); 3527c8199e2SAlexei Starovoitov if (!kmem_cache) { 3537c8199e2SAlexei Starovoitov free_percpu(pc); 3547c8199e2SAlexei Starovoitov return -ENOMEM; 3557c8199e2SAlexei Starovoitov } 3567c8199e2SAlexei Starovoitov #ifdef CONFIG_MEMCG_KMEM 3577c8199e2SAlexei Starovoitov objcg = get_obj_cgroup_from_current(); 3587c8199e2SAlexei Starovoitov #endif 3597c8199e2SAlexei Starovoitov for_each_possible_cpu(cpu) { 3607c8199e2SAlexei Starovoitov c = per_cpu_ptr(pc, cpu); 3617c8199e2SAlexei Starovoitov c->kmem_cache = kmem_cache; 3627c8199e2SAlexei Starovoitov c->unit_size = size; 3637c8199e2SAlexei Starovoitov c->objcg = objcg; 3647c8199e2SAlexei Starovoitov prefill_mem_cache(c, cpu); 3657c8199e2SAlexei Starovoitov } 3667c8199e2SAlexei Starovoitov ma->cache = pc; 3677c8199e2SAlexei Starovoitov return 0; 3687c8199e2SAlexei Starovoitov } 3697c8199e2SAlexei Starovoitov 3707c8199e2SAlexei Starovoitov pcc = __alloc_percpu_gfp(sizeof(*cc), 8, GFP_KERNEL); 3717c8199e2SAlexei Starovoitov if (!pcc) 3727c8199e2SAlexei Starovoitov return -ENOMEM; 3737c8199e2SAlexei Starovoitov #ifdef CONFIG_MEMCG_KMEM 3747c8199e2SAlexei Starovoitov objcg = get_obj_cgroup_from_current(); 3757c8199e2SAlexei Starovoitov #endif 3767c8199e2SAlexei Starovoitov for_each_possible_cpu(cpu) { 3777c8199e2SAlexei Starovoitov cc = per_cpu_ptr(pcc, cpu); 3787c8199e2SAlexei Starovoitov for (i = 0; i < NUM_CACHES; i++) { 3797c8199e2SAlexei Starovoitov c = &cc->cache[i]; 3807c8199e2SAlexei Starovoitov c->unit_size = sizes[i]; 3817c8199e2SAlexei Starovoitov c->objcg = objcg; 3827c8199e2SAlexei Starovoitov prefill_mem_cache(c, cpu); 3837c8199e2SAlexei Starovoitov } 3847c8199e2SAlexei Starovoitov } 3857c8199e2SAlexei Starovoitov ma->caches = pcc; 3867c8199e2SAlexei Starovoitov return 0; 3877c8199e2SAlexei Starovoitov } 3887c8199e2SAlexei Starovoitov 3897c8199e2SAlexei Starovoitov static void drain_mem_cache(struct bpf_mem_cache *c) 3907c8199e2SAlexei Starovoitov { 3917c8199e2SAlexei Starovoitov struct llist_node *llnode, *t; 3927c8199e2SAlexei Starovoitov 393*8d5a8011SAlexei Starovoitov /* The caller has done rcu_barrier() and no progs are using this 394*8d5a8011SAlexei Starovoitov * bpf_mem_cache, but htab_map_free() called bpf_mem_cache_free() for 395*8d5a8011SAlexei Starovoitov * all remaining elements and they can be in free_by_rcu or in 396*8d5a8011SAlexei Starovoitov * waiting_for_gp lists, so drain those lists now. 397*8d5a8011SAlexei Starovoitov */ 398*8d5a8011SAlexei Starovoitov llist_for_each_safe(llnode, t, __llist_del_all(&c->free_by_rcu)) 399*8d5a8011SAlexei Starovoitov free_one(c, llnode); 400*8d5a8011SAlexei Starovoitov llist_for_each_safe(llnode, t, llist_del_all(&c->waiting_for_gp)) 401*8d5a8011SAlexei Starovoitov free_one(c, llnode); 4027c8199e2SAlexei Starovoitov llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist)) 4037c8199e2SAlexei Starovoitov free_one(c, llnode); 4047c8199e2SAlexei Starovoitov llist_for_each_safe(llnode, t, llist_del_all(&c->free_llist_extra)) 4057c8199e2SAlexei Starovoitov free_one(c, llnode); 4067c8199e2SAlexei Starovoitov } 4077c8199e2SAlexei Starovoitov 4087c8199e2SAlexei Starovoitov void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma) 4097c8199e2SAlexei Starovoitov { 4107c8199e2SAlexei Starovoitov struct bpf_mem_caches *cc; 4117c8199e2SAlexei Starovoitov struct bpf_mem_cache *c; 4127c8199e2SAlexei Starovoitov int cpu, i; 4137c8199e2SAlexei Starovoitov 4147c8199e2SAlexei Starovoitov if (ma->cache) { 4157c8199e2SAlexei Starovoitov for_each_possible_cpu(cpu) { 4167c8199e2SAlexei Starovoitov c = per_cpu_ptr(ma->cache, cpu); 4177c8199e2SAlexei Starovoitov drain_mem_cache(c); 4187c8199e2SAlexei Starovoitov } 4197c8199e2SAlexei Starovoitov /* kmem_cache and memcg are the same across cpus */ 4207c8199e2SAlexei Starovoitov kmem_cache_destroy(c->kmem_cache); 4217c8199e2SAlexei Starovoitov if (c->objcg) 4227c8199e2SAlexei Starovoitov obj_cgroup_put(c->objcg); 423*8d5a8011SAlexei Starovoitov /* c->waiting_for_gp list was drained, but __free_rcu might 424*8d5a8011SAlexei Starovoitov * still execute. Wait for it now before we free 'c'. 425*8d5a8011SAlexei Starovoitov */ 426*8d5a8011SAlexei Starovoitov rcu_barrier(); 4277c8199e2SAlexei Starovoitov free_percpu(ma->cache); 4287c8199e2SAlexei Starovoitov ma->cache = NULL; 4297c8199e2SAlexei Starovoitov } 4307c8199e2SAlexei Starovoitov if (ma->caches) { 4317c8199e2SAlexei Starovoitov for_each_possible_cpu(cpu) { 4327c8199e2SAlexei Starovoitov cc = per_cpu_ptr(ma->caches, cpu); 4337c8199e2SAlexei Starovoitov for (i = 0; i < NUM_CACHES; i++) { 4347c8199e2SAlexei Starovoitov c = &cc->cache[i]; 4357c8199e2SAlexei Starovoitov drain_mem_cache(c); 4367c8199e2SAlexei Starovoitov } 4377c8199e2SAlexei Starovoitov } 4387c8199e2SAlexei Starovoitov if (c->objcg) 4397c8199e2SAlexei Starovoitov obj_cgroup_put(c->objcg); 440*8d5a8011SAlexei Starovoitov rcu_barrier(); 4417c8199e2SAlexei Starovoitov free_percpu(ma->caches); 4427c8199e2SAlexei Starovoitov ma->caches = NULL; 4437c8199e2SAlexei Starovoitov } 4447c8199e2SAlexei Starovoitov } 4457c8199e2SAlexei Starovoitov 4467c8199e2SAlexei Starovoitov /* notrace is necessary here and in other functions to make sure 4477c8199e2SAlexei Starovoitov * bpf programs cannot attach to them and cause llist corruptions. 4487c8199e2SAlexei Starovoitov */ 4497c8199e2SAlexei Starovoitov static void notrace *unit_alloc(struct bpf_mem_cache *c) 4507c8199e2SAlexei Starovoitov { 4517c8199e2SAlexei Starovoitov struct llist_node *llnode = NULL; 4527c8199e2SAlexei Starovoitov unsigned long flags; 4537c8199e2SAlexei Starovoitov int cnt = 0; 4547c8199e2SAlexei Starovoitov 4557c8199e2SAlexei Starovoitov /* Disable irqs to prevent the following race for majority of prog types: 4567c8199e2SAlexei Starovoitov * prog_A 4577c8199e2SAlexei Starovoitov * bpf_mem_alloc 4587c8199e2SAlexei Starovoitov * preemption or irq -> prog_B 4597c8199e2SAlexei Starovoitov * bpf_mem_alloc 4607c8199e2SAlexei Starovoitov * 4617c8199e2SAlexei Starovoitov * but prog_B could be a perf_event NMI prog. 4627c8199e2SAlexei Starovoitov * Use per-cpu 'active' counter to order free_list access between 4637c8199e2SAlexei Starovoitov * unit_alloc/unit_free/bpf_mem_refill. 4647c8199e2SAlexei Starovoitov */ 4657c8199e2SAlexei Starovoitov local_irq_save(flags); 4667c8199e2SAlexei Starovoitov if (local_inc_return(&c->active) == 1) { 4677c8199e2SAlexei Starovoitov llnode = __llist_del_first(&c->free_llist); 4687c8199e2SAlexei Starovoitov if (llnode) 4697c8199e2SAlexei Starovoitov cnt = --c->free_cnt; 4707c8199e2SAlexei Starovoitov } 4717c8199e2SAlexei Starovoitov local_dec(&c->active); 4727c8199e2SAlexei Starovoitov local_irq_restore(flags); 4737c8199e2SAlexei Starovoitov 4747c8199e2SAlexei Starovoitov WARN_ON(cnt < 0); 4757c8199e2SAlexei Starovoitov 4767c266178SAlexei Starovoitov if (cnt < c->low_watermark) 4777c8199e2SAlexei Starovoitov irq_work_raise(c); 4787c8199e2SAlexei Starovoitov return llnode; 4797c8199e2SAlexei Starovoitov } 4807c8199e2SAlexei Starovoitov 4817c8199e2SAlexei Starovoitov /* Though 'ptr' object could have been allocated on a different cpu 4827c8199e2SAlexei Starovoitov * add it to the free_llist of the current cpu. 4837c8199e2SAlexei Starovoitov * Let kfree() logic deal with it when it's later called from irq_work. 4847c8199e2SAlexei Starovoitov */ 4857c8199e2SAlexei Starovoitov static void notrace unit_free(struct bpf_mem_cache *c, void *ptr) 4867c8199e2SAlexei Starovoitov { 4877c8199e2SAlexei Starovoitov struct llist_node *llnode = ptr - LLIST_NODE_SZ; 4887c8199e2SAlexei Starovoitov unsigned long flags; 4897c8199e2SAlexei Starovoitov int cnt = 0; 4907c8199e2SAlexei Starovoitov 4917c8199e2SAlexei Starovoitov BUILD_BUG_ON(LLIST_NODE_SZ > 8); 4927c8199e2SAlexei Starovoitov 4937c8199e2SAlexei Starovoitov local_irq_save(flags); 4947c8199e2SAlexei Starovoitov if (local_inc_return(&c->active) == 1) { 4957c8199e2SAlexei Starovoitov __llist_add(llnode, &c->free_llist); 4967c8199e2SAlexei Starovoitov cnt = ++c->free_cnt; 4977c8199e2SAlexei Starovoitov } else { 4987c8199e2SAlexei Starovoitov /* unit_free() cannot fail. Therefore add an object to atomic 4997c8199e2SAlexei Starovoitov * llist. free_bulk() will drain it. Though free_llist_extra is 5007c8199e2SAlexei Starovoitov * a per-cpu list we have to use atomic llist_add here, since 5017c8199e2SAlexei Starovoitov * it also can be interrupted by bpf nmi prog that does another 5027c8199e2SAlexei Starovoitov * unit_free() into the same free_llist_extra. 5037c8199e2SAlexei Starovoitov */ 5047c8199e2SAlexei Starovoitov llist_add(llnode, &c->free_llist_extra); 5057c8199e2SAlexei Starovoitov } 5067c8199e2SAlexei Starovoitov local_dec(&c->active); 5077c8199e2SAlexei Starovoitov local_irq_restore(flags); 5087c8199e2SAlexei Starovoitov 5097c266178SAlexei Starovoitov if (cnt > c->high_watermark) 5107c8199e2SAlexei Starovoitov /* free few objects from current cpu into global kmalloc pool */ 5117c8199e2SAlexei Starovoitov irq_work_raise(c); 5127c8199e2SAlexei Starovoitov } 5137c8199e2SAlexei Starovoitov 5147c8199e2SAlexei Starovoitov /* Called from BPF program or from sys_bpf syscall. 5157c8199e2SAlexei Starovoitov * In both cases migration is disabled. 5167c8199e2SAlexei Starovoitov */ 5177c8199e2SAlexei Starovoitov void notrace *bpf_mem_alloc(struct bpf_mem_alloc *ma, size_t size) 5187c8199e2SAlexei Starovoitov { 5197c8199e2SAlexei Starovoitov int idx; 5207c8199e2SAlexei Starovoitov void *ret; 5217c8199e2SAlexei Starovoitov 5227c8199e2SAlexei Starovoitov if (!size) 5237c8199e2SAlexei Starovoitov return ZERO_SIZE_PTR; 5247c8199e2SAlexei Starovoitov 5257c8199e2SAlexei Starovoitov idx = bpf_mem_cache_idx(size + LLIST_NODE_SZ); 5267c8199e2SAlexei Starovoitov if (idx < 0) 5277c8199e2SAlexei Starovoitov return NULL; 5287c8199e2SAlexei Starovoitov 5297c8199e2SAlexei Starovoitov ret = unit_alloc(this_cpu_ptr(ma->caches)->cache + idx); 5307c8199e2SAlexei Starovoitov return !ret ? NULL : ret + LLIST_NODE_SZ; 5317c8199e2SAlexei Starovoitov } 5327c8199e2SAlexei Starovoitov 5337c8199e2SAlexei Starovoitov void notrace bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr) 5347c8199e2SAlexei Starovoitov { 5357c8199e2SAlexei Starovoitov int idx; 5367c8199e2SAlexei Starovoitov 5377c8199e2SAlexei Starovoitov if (!ptr) 5387c8199e2SAlexei Starovoitov return; 5397c8199e2SAlexei Starovoitov 5407c8199e2SAlexei Starovoitov idx = bpf_mem_cache_idx(__ksize(ptr - LLIST_NODE_SZ)); 5417c8199e2SAlexei Starovoitov if (idx < 0) 5427c8199e2SAlexei Starovoitov return; 5437c8199e2SAlexei Starovoitov 5447c8199e2SAlexei Starovoitov unit_free(this_cpu_ptr(ma->caches)->cache + idx, ptr); 5457c8199e2SAlexei Starovoitov } 5467c8199e2SAlexei Starovoitov 5477c8199e2SAlexei Starovoitov void notrace *bpf_mem_cache_alloc(struct bpf_mem_alloc *ma) 5487c8199e2SAlexei Starovoitov { 5497c8199e2SAlexei Starovoitov void *ret; 5507c8199e2SAlexei Starovoitov 5517c8199e2SAlexei Starovoitov ret = unit_alloc(this_cpu_ptr(ma->cache)); 5527c8199e2SAlexei Starovoitov return !ret ? NULL : ret + LLIST_NODE_SZ; 5537c8199e2SAlexei Starovoitov } 5547c8199e2SAlexei Starovoitov 5557c8199e2SAlexei Starovoitov void notrace bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr) 5567c8199e2SAlexei Starovoitov { 5577c8199e2SAlexei Starovoitov if (!ptr) 5587c8199e2SAlexei Starovoitov return; 5597c8199e2SAlexei Starovoitov 5607c8199e2SAlexei Starovoitov unit_free(this_cpu_ptr(ma->cache), ptr); 5617c8199e2SAlexei Starovoitov } 562