1450af8d0SKP Singh // SPDX-License-Identifier: GPL-2.0 2450af8d0SKP Singh /* Copyright (c) 2019 Facebook */ 3450af8d0SKP Singh #include <linux/rculist.h> 4450af8d0SKP Singh #include <linux/list.h> 5450af8d0SKP Singh #include <linux/hash.h> 6450af8d0SKP Singh #include <linux/types.h> 7450af8d0SKP Singh #include <linux/spinlock.h> 8450af8d0SKP Singh #include <linux/bpf.h> 9450af8d0SKP Singh #include <linux/btf_ids.h> 10450af8d0SKP Singh #include <linux/bpf_local_storage.h> 11450af8d0SKP Singh #include <net/sock.h> 12450af8d0SKP Singh #include <uapi/linux/sock_diag.h> 13450af8d0SKP Singh #include <uapi/linux/btf.h> 140fe4b381SKP Singh #include <linux/rcupdate.h> 150fe4b381SKP Singh #include <linux/rcupdate_trace.h> 160fe4b381SKP Singh #include <linux/rcupdate_wait.h> 17450af8d0SKP Singh 18450af8d0SKP Singh #define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_CLONE) 19450af8d0SKP Singh 20450af8d0SKP Singh static struct bpf_local_storage_map_bucket * 21450af8d0SKP Singh select_bucket(struct bpf_local_storage_map *smap, 22450af8d0SKP Singh struct bpf_local_storage_elem *selem) 23450af8d0SKP Singh { 24450af8d0SKP Singh return &smap->buckets[hash_ptr(selem, smap->bucket_log)]; 25450af8d0SKP Singh } 26450af8d0SKP Singh 27450af8d0SKP Singh static int mem_charge(struct bpf_local_storage_map *smap, void *owner, u32 size) 28450af8d0SKP Singh { 29450af8d0SKP Singh struct bpf_map *map = &smap->map; 30450af8d0SKP Singh 31450af8d0SKP Singh if (!map->ops->map_local_storage_charge) 32450af8d0SKP Singh return 0; 33450af8d0SKP Singh 34450af8d0SKP Singh return map->ops->map_local_storage_charge(smap, owner, size); 35450af8d0SKP Singh } 36450af8d0SKP Singh 37450af8d0SKP Singh static void mem_uncharge(struct bpf_local_storage_map *smap, void *owner, 38450af8d0SKP Singh u32 size) 39450af8d0SKP Singh { 40450af8d0SKP Singh struct bpf_map *map = &smap->map; 41450af8d0SKP Singh 42450af8d0SKP Singh if (map->ops->map_local_storage_uncharge) 43450af8d0SKP Singh map->ops->map_local_storage_uncharge(smap, owner, size); 44450af8d0SKP Singh } 45450af8d0SKP Singh 46450af8d0SKP Singh static struct bpf_local_storage __rcu ** 47450af8d0SKP Singh owner_storage(struct bpf_local_storage_map *smap, void *owner) 48450af8d0SKP Singh { 49450af8d0SKP Singh struct bpf_map *map = &smap->map; 50450af8d0SKP Singh 51450af8d0SKP Singh return map->ops->map_owner_storage_ptr(owner); 52450af8d0SKP Singh } 53450af8d0SKP Singh 540a09a2f9SKumar Kartikeya Dwivedi static bool selem_linked_to_storage_lockless(const struct bpf_local_storage_elem *selem) 550a09a2f9SKumar Kartikeya Dwivedi { 560a09a2f9SKumar Kartikeya Dwivedi return !hlist_unhashed_lockless(&selem->snode); 570a09a2f9SKumar Kartikeya Dwivedi } 580a09a2f9SKumar Kartikeya Dwivedi 59450af8d0SKP Singh static bool selem_linked_to_storage(const struct bpf_local_storage_elem *selem) 60450af8d0SKP Singh { 61450af8d0SKP Singh return !hlist_unhashed(&selem->snode); 62450af8d0SKP Singh } 63450af8d0SKP Singh 640a09a2f9SKumar Kartikeya Dwivedi static bool selem_linked_to_map_lockless(const struct bpf_local_storage_elem *selem) 650a09a2f9SKumar Kartikeya Dwivedi { 660a09a2f9SKumar Kartikeya Dwivedi return !hlist_unhashed_lockless(&selem->map_node); 670a09a2f9SKumar Kartikeya Dwivedi } 680a09a2f9SKumar Kartikeya Dwivedi 69450af8d0SKP Singh static bool selem_linked_to_map(const struct bpf_local_storage_elem *selem) 70450af8d0SKP Singh { 71450af8d0SKP Singh return !hlist_unhashed(&selem->map_node); 72450af8d0SKP Singh } 73450af8d0SKP Singh 74450af8d0SKP Singh struct bpf_local_storage_elem * 75450af8d0SKP Singh bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner, 76b00fa38aSJoanne Koong void *value, bool charge_mem, gfp_t gfp_flags) 77450af8d0SKP Singh { 78450af8d0SKP Singh struct bpf_local_storage_elem *selem; 79450af8d0SKP Singh 80450af8d0SKP Singh if (charge_mem && mem_charge(smap, owner, smap->elem_size)) 81450af8d0SKP Singh return NULL; 82450af8d0SKP Singh 83e9aae8beSRoman Gushchin selem = bpf_map_kzalloc(&smap->map, smap->elem_size, 84b00fa38aSJoanne Koong gfp_flags | __GFP_NOWARN); 85450af8d0SKP Singh if (selem) { 86450af8d0SKP Singh if (value) 87836e49e1SXu Kuohai copy_map_value(&smap->map, SDATA(selem)->data, value); 889db44fddSKumar Kartikeya Dwivedi /* No need to call check_and_init_map_value as memory is zero init */ 89450af8d0SKP Singh return selem; 90450af8d0SKP Singh } 91450af8d0SKP Singh 92450af8d0SKP Singh if (charge_mem) 93450af8d0SKP Singh mem_uncharge(smap, owner, smap->elem_size); 94450af8d0SKP Singh 95450af8d0SKP Singh return NULL; 96450af8d0SKP Singh } 97450af8d0SKP Singh 984cbd23ccSMartin KaFai Lau static void bpf_local_storage_free_rcu(struct rcu_head *rcu) 990fe4b381SKP Singh { 1000fe4b381SKP Singh struct bpf_local_storage *local_storage; 1010fe4b381SKP Singh 102*1288aaa2SMartin KaFai Lau local_storage = container_of(rcu, struct bpf_local_storage, rcu); 103*1288aaa2SMartin KaFai Lau kfree(local_storage); 104*1288aaa2SMartin KaFai Lau } 105*1288aaa2SMartin KaFai Lau 106*1288aaa2SMartin KaFai Lau static void bpf_local_storage_free_trace_rcu(struct rcu_head *rcu) 107*1288aaa2SMartin KaFai Lau { 108d39d1445SHou Tao /* If RCU Tasks Trace grace period implies RCU grace period, do 109d39d1445SHou Tao * kfree(), else do kfree_rcu(). 110d39d1445SHou Tao */ 111d39d1445SHou Tao if (rcu_trace_implies_rcu_gp()) 112*1288aaa2SMartin KaFai Lau bpf_local_storage_free_rcu(rcu); 113d39d1445SHou Tao else 114*1288aaa2SMartin KaFai Lau call_rcu(rcu, bpf_local_storage_free_rcu); 1150fe4b381SKP Singh } 1160fe4b381SKP Singh 117f8ccf30cSMartin KaFai Lau static void bpf_selem_free_rcu(struct rcu_head *rcu) 1180fe4b381SKP Singh { 1190fe4b381SKP Singh struct bpf_local_storage_elem *selem; 1200fe4b381SKP Singh 1210fe4b381SKP Singh selem = container_of(rcu, struct bpf_local_storage_elem, rcu); 122e768e3c5SKumar Kartikeya Dwivedi kfree(selem); 123f8ccf30cSMartin KaFai Lau } 124f8ccf30cSMartin KaFai Lau 125f8ccf30cSMartin KaFai Lau static void bpf_selem_free_trace_rcu(struct rcu_head *rcu) 126f8ccf30cSMartin KaFai Lau { 127f8ccf30cSMartin KaFai Lau if (rcu_trace_implies_rcu_gp()) 128f8ccf30cSMartin KaFai Lau bpf_selem_free_rcu(rcu); 129d39d1445SHou Tao else 130f8ccf30cSMartin KaFai Lau call_rcu(rcu, bpf_selem_free_rcu); 1310fe4b381SKP Singh } 1320fe4b381SKP Singh 133c0d63f30SMartin KaFai Lau void bpf_selem_free(struct bpf_local_storage_elem *selem, 134c0d63f30SMartin KaFai Lau struct bpf_local_storage_map *smap, 135c0d63f30SMartin KaFai Lau bool reuse_now) 136c0d63f30SMartin KaFai Lau { 137c0d63f30SMartin KaFai Lau bpf_obj_free_fields(smap->map.record, SDATA(selem)->data); 138c0d63f30SMartin KaFai Lau if (!reuse_now) 139c0d63f30SMartin KaFai Lau call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_trace_rcu); 140c0d63f30SMartin KaFai Lau else 141c0d63f30SMartin KaFai Lau call_rcu(&selem->rcu, bpf_selem_free_rcu); 142c0d63f30SMartin KaFai Lau } 143c0d63f30SMartin KaFai Lau 144450af8d0SKP Singh /* local_storage->lock must be held and selem->local_storage == local_storage. 145450af8d0SKP Singh * The caller must ensure selem->smap is still valid to be 146450af8d0SKP Singh * dereferenced for its smap->elem_size and smap->cache_idx. 147450af8d0SKP Singh */ 148c83597faSYonghong Song static bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage, 149450af8d0SKP Singh struct bpf_local_storage_elem *selem, 150a47eabf2SMartin KaFai Lau bool uncharge_mem, bool reuse_now) 151450af8d0SKP Singh { 152450af8d0SKP Singh struct bpf_local_storage_map *smap; 153450af8d0SKP Singh bool free_local_storage; 154450af8d0SKP Singh void *owner; 155450af8d0SKP Singh 1560fe4b381SKP Singh smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held()); 157450af8d0SKP Singh owner = local_storage->owner; 158450af8d0SKP Singh 159450af8d0SKP Singh /* All uncharging on the owner must be done first. 160450af8d0SKP Singh * The owner may be freed once the last selem is unlinked 161450af8d0SKP Singh * from local_storage. 162450af8d0SKP Singh */ 163450af8d0SKP Singh if (uncharge_mem) 164450af8d0SKP Singh mem_uncharge(smap, owner, smap->elem_size); 165450af8d0SKP Singh 166450af8d0SKP Singh free_local_storage = hlist_is_singular_node(&selem->snode, 167450af8d0SKP Singh &local_storage->list); 168450af8d0SKP Singh if (free_local_storage) { 169450af8d0SKP Singh mem_uncharge(smap, owner, sizeof(struct bpf_local_storage)); 170450af8d0SKP Singh local_storage->owner = NULL; 171450af8d0SKP Singh 172450af8d0SKP Singh /* After this RCU_INIT, owner may be freed and cannot be used */ 173450af8d0SKP Singh RCU_INIT_POINTER(*owner_storage(smap, owner), NULL); 174450af8d0SKP Singh 175450af8d0SKP Singh /* local_storage is not freed now. local_storage->lock is 176450af8d0SKP Singh * still held and raw_spin_unlock_bh(&local_storage->lock) 177450af8d0SKP Singh * will be done by the caller. 178450af8d0SKP Singh * 179450af8d0SKP Singh * Although the unlock will be done under 180c561d110STom Rix * rcu_read_lock(), it is more intuitive to 1810fe4b381SKP Singh * read if the freeing of the storage is done 182450af8d0SKP Singh * after the raw_spin_unlock_bh(&local_storage->lock). 183450af8d0SKP Singh * 184450af8d0SKP Singh * Hence, a "bool free_local_storage" is returned 1850fe4b381SKP Singh * to the caller which then calls then frees the storage after 1860fe4b381SKP Singh * all the RCU grace periods have expired. 187450af8d0SKP Singh */ 188450af8d0SKP Singh } 189450af8d0SKP Singh hlist_del_init_rcu(&selem->snode); 190450af8d0SKP Singh if (rcu_access_pointer(local_storage->cache[smap->cache_idx]) == 191450af8d0SKP Singh SDATA(selem)) 192450af8d0SKP Singh RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL); 193450af8d0SKP Singh 194c0d63f30SMartin KaFai Lau bpf_selem_free(selem, smap, reuse_now); 195dcf456c9SKP Singh 196fc6652aaSMartin KaFai Lau if (rcu_access_pointer(local_storage->smap) == smap) 197fc6652aaSMartin KaFai Lau RCU_INIT_POINTER(local_storage->smap, NULL); 198fc6652aaSMartin KaFai Lau 199450af8d0SKP Singh return free_local_storage; 200450af8d0SKP Singh } 201450af8d0SKP Singh 202121f31f3SMartin KaFai Lau static void bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem, 203a47eabf2SMartin KaFai Lau bool reuse_now) 204450af8d0SKP Singh { 205450af8d0SKP Singh struct bpf_local_storage *local_storage; 206450af8d0SKP Singh bool free_local_storage = false; 207a10787e6SSong Liu unsigned long flags; 208450af8d0SKP Singh 2090a09a2f9SKumar Kartikeya Dwivedi if (unlikely(!selem_linked_to_storage_lockless(selem))) 210450af8d0SKP Singh /* selem has already been unlinked from sk */ 211450af8d0SKP Singh return; 212450af8d0SKP Singh 2130fe4b381SKP Singh local_storage = rcu_dereference_check(selem->local_storage, 2140fe4b381SKP Singh bpf_rcu_lock_held()); 215a10787e6SSong Liu raw_spin_lock_irqsave(&local_storage->lock, flags); 216450af8d0SKP Singh if (likely(selem_linked_to_storage(selem))) 217450af8d0SKP Singh free_local_storage = bpf_selem_unlink_storage_nolock( 218a47eabf2SMartin KaFai Lau local_storage, selem, true, reuse_now); 219a10787e6SSong Liu raw_spin_unlock_irqrestore(&local_storage->lock, flags); 220450af8d0SKP Singh 221dcf456c9SKP Singh if (free_local_storage) { 222a47eabf2SMartin KaFai Lau if (!reuse_now) 2230fe4b381SKP Singh call_rcu_tasks_trace(&local_storage->rcu, 224*1288aaa2SMartin KaFai Lau bpf_local_storage_free_trace_rcu); 225dcf456c9SKP Singh else 226*1288aaa2SMartin KaFai Lau call_rcu(&local_storage->rcu, bpf_local_storage_free_rcu); 227dcf456c9SKP Singh } 228450af8d0SKP Singh } 229450af8d0SKP Singh 230450af8d0SKP Singh void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage, 231450af8d0SKP Singh struct bpf_local_storage_elem *selem) 232450af8d0SKP Singh { 233450af8d0SKP Singh RCU_INIT_POINTER(selem->local_storage, local_storage); 23470b97111SMartin KaFai Lau hlist_add_head_rcu(&selem->snode, &local_storage->list); 235450af8d0SKP Singh } 236450af8d0SKP Singh 2374cbd23ccSMartin KaFai Lau static void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem) 238450af8d0SKP Singh { 239450af8d0SKP Singh struct bpf_local_storage_map *smap; 240450af8d0SKP Singh struct bpf_local_storage_map_bucket *b; 241a10787e6SSong Liu unsigned long flags; 242450af8d0SKP Singh 2430a09a2f9SKumar Kartikeya Dwivedi if (unlikely(!selem_linked_to_map_lockless(selem))) 244450af8d0SKP Singh /* selem has already be unlinked from smap */ 245450af8d0SKP Singh return; 246450af8d0SKP Singh 2470fe4b381SKP Singh smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held()); 248450af8d0SKP Singh b = select_bucket(smap, selem); 249a10787e6SSong Liu raw_spin_lock_irqsave(&b->lock, flags); 250450af8d0SKP Singh if (likely(selem_linked_to_map(selem))) 251450af8d0SKP Singh hlist_del_init_rcu(&selem->map_node); 252a10787e6SSong Liu raw_spin_unlock_irqrestore(&b->lock, flags); 253450af8d0SKP Singh } 254450af8d0SKP Singh 255450af8d0SKP Singh void bpf_selem_link_map(struct bpf_local_storage_map *smap, 256450af8d0SKP Singh struct bpf_local_storage_elem *selem) 257450af8d0SKP Singh { 258450af8d0SKP Singh struct bpf_local_storage_map_bucket *b = select_bucket(smap, selem); 259a10787e6SSong Liu unsigned long flags; 260450af8d0SKP Singh 261a10787e6SSong Liu raw_spin_lock_irqsave(&b->lock, flags); 262450af8d0SKP Singh RCU_INIT_POINTER(SDATA(selem)->smap, smap); 263450af8d0SKP Singh hlist_add_head_rcu(&selem->map_node, &b->list); 264a10787e6SSong Liu raw_spin_unlock_irqrestore(&b->lock, flags); 265450af8d0SKP Singh } 266450af8d0SKP Singh 267a47eabf2SMartin KaFai Lau void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool reuse_now) 268450af8d0SKP Singh { 269450af8d0SKP Singh /* Always unlink from map before unlinking from local_storage 270450af8d0SKP Singh * because selem will be freed after successfully unlinked from 271450af8d0SKP Singh * the local_storage. 272450af8d0SKP Singh */ 273450af8d0SKP Singh bpf_selem_unlink_map(selem); 274a47eabf2SMartin KaFai Lau bpf_selem_unlink_storage(selem, reuse_now); 275450af8d0SKP Singh } 276450af8d0SKP Singh 277e8b02296SMartin KaFai Lau /* If cacheit_lockit is false, this lookup function is lockless */ 278450af8d0SKP Singh struct bpf_local_storage_data * 279450af8d0SKP Singh bpf_local_storage_lookup(struct bpf_local_storage *local_storage, 280450af8d0SKP Singh struct bpf_local_storage_map *smap, 281450af8d0SKP Singh bool cacheit_lockit) 282450af8d0SKP Singh { 283450af8d0SKP Singh struct bpf_local_storage_data *sdata; 284450af8d0SKP Singh struct bpf_local_storage_elem *selem; 285450af8d0SKP Singh 286450af8d0SKP Singh /* Fast path (cache hit) */ 2870fe4b381SKP Singh sdata = rcu_dereference_check(local_storage->cache[smap->cache_idx], 2880fe4b381SKP Singh bpf_rcu_lock_held()); 289450af8d0SKP Singh if (sdata && rcu_access_pointer(sdata->smap) == smap) 290450af8d0SKP Singh return sdata; 291450af8d0SKP Singh 292450af8d0SKP Singh /* Slow path (cache miss) */ 2930fe4b381SKP Singh hlist_for_each_entry_rcu(selem, &local_storage->list, snode, 2940fe4b381SKP Singh rcu_read_lock_trace_held()) 295450af8d0SKP Singh if (rcu_access_pointer(SDATA(selem)->smap) == smap) 296450af8d0SKP Singh break; 297450af8d0SKP Singh 298450af8d0SKP Singh if (!selem) 299450af8d0SKP Singh return NULL; 300450af8d0SKP Singh 301450af8d0SKP Singh sdata = SDATA(selem); 302450af8d0SKP Singh if (cacheit_lockit) { 303a10787e6SSong Liu unsigned long flags; 304a10787e6SSong Liu 305450af8d0SKP Singh /* spinlock is needed to avoid racing with the 306450af8d0SKP Singh * parallel delete. Otherwise, publishing an already 307450af8d0SKP Singh * deleted sdata to the cache will become a use-after-free 308450af8d0SKP Singh * problem in the next bpf_local_storage_lookup(). 309450af8d0SKP Singh */ 310a10787e6SSong Liu raw_spin_lock_irqsave(&local_storage->lock, flags); 311450af8d0SKP Singh if (selem_linked_to_storage(selem)) 312450af8d0SKP Singh rcu_assign_pointer(local_storage->cache[smap->cache_idx], 313450af8d0SKP Singh sdata); 314a10787e6SSong Liu raw_spin_unlock_irqrestore(&local_storage->lock, flags); 315450af8d0SKP Singh } 316450af8d0SKP Singh 317450af8d0SKP Singh return sdata; 318450af8d0SKP Singh } 319450af8d0SKP Singh 320450af8d0SKP Singh static int check_flags(const struct bpf_local_storage_data *old_sdata, 321450af8d0SKP Singh u64 map_flags) 322450af8d0SKP Singh { 323450af8d0SKP Singh if (old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST) 324450af8d0SKP Singh /* elem already exists */ 325450af8d0SKP Singh return -EEXIST; 326450af8d0SKP Singh 327450af8d0SKP Singh if (!old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_EXIST) 328450af8d0SKP Singh /* elem doesn't exist, cannot update it */ 329450af8d0SKP Singh return -ENOENT; 330450af8d0SKP Singh 331450af8d0SKP Singh return 0; 332450af8d0SKP Singh } 333450af8d0SKP Singh 334450af8d0SKP Singh int bpf_local_storage_alloc(void *owner, 335450af8d0SKP Singh struct bpf_local_storage_map *smap, 336b00fa38aSJoanne Koong struct bpf_local_storage_elem *first_selem, 337b00fa38aSJoanne Koong gfp_t gfp_flags) 338450af8d0SKP Singh { 339450af8d0SKP Singh struct bpf_local_storage *prev_storage, *storage; 340450af8d0SKP Singh struct bpf_local_storage **owner_storage_ptr; 341450af8d0SKP Singh int err; 342450af8d0SKP Singh 343450af8d0SKP Singh err = mem_charge(smap, owner, sizeof(*storage)); 344450af8d0SKP Singh if (err) 345450af8d0SKP Singh return err; 346450af8d0SKP Singh 347e9aae8beSRoman Gushchin storage = bpf_map_kzalloc(&smap->map, sizeof(*storage), 348b00fa38aSJoanne Koong gfp_flags | __GFP_NOWARN); 349450af8d0SKP Singh if (!storage) { 350450af8d0SKP Singh err = -ENOMEM; 351450af8d0SKP Singh goto uncharge; 352450af8d0SKP Singh } 353450af8d0SKP Singh 354fc6652aaSMartin KaFai Lau RCU_INIT_POINTER(storage->smap, smap); 355450af8d0SKP Singh INIT_HLIST_HEAD(&storage->list); 356450af8d0SKP Singh raw_spin_lock_init(&storage->lock); 357450af8d0SKP Singh storage->owner = owner; 358450af8d0SKP Singh 359450af8d0SKP Singh bpf_selem_link_storage_nolock(storage, first_selem); 360450af8d0SKP Singh bpf_selem_link_map(smap, first_selem); 361450af8d0SKP Singh 362450af8d0SKP Singh owner_storage_ptr = 363450af8d0SKP Singh (struct bpf_local_storage **)owner_storage(smap, owner); 364450af8d0SKP Singh /* Publish storage to the owner. 365450af8d0SKP Singh * Instead of using any lock of the kernel object (i.e. owner), 366450af8d0SKP Singh * cmpxchg will work with any kernel object regardless what 367450af8d0SKP Singh * the running context is, bh, irq...etc. 368450af8d0SKP Singh * 369450af8d0SKP Singh * From now on, the owner->storage pointer (e.g. sk->sk_bpf_storage) 370450af8d0SKP Singh * is protected by the storage->lock. Hence, when freeing 371450af8d0SKP Singh * the owner->storage, the storage->lock must be held before 372450af8d0SKP Singh * setting owner->storage ptr to NULL. 373450af8d0SKP Singh */ 374450af8d0SKP Singh prev_storage = cmpxchg(owner_storage_ptr, NULL, storage); 375450af8d0SKP Singh if (unlikely(prev_storage)) { 376450af8d0SKP Singh bpf_selem_unlink_map(first_selem); 377450af8d0SKP Singh err = -EAGAIN; 378450af8d0SKP Singh goto uncharge; 379450af8d0SKP Singh 380450af8d0SKP Singh /* Note that even first_selem was linked to smap's 381450af8d0SKP Singh * bucket->list, first_selem can be freed immediately 382450af8d0SKP Singh * (instead of kfree_rcu) because 383450af8d0SKP Singh * bpf_local_storage_map_free() does a 3840fe4b381SKP Singh * synchronize_rcu_mult (waiting for both sleepable and 3850fe4b381SKP Singh * normal programs) before walking the bucket->list. 386450af8d0SKP Singh * Hence, no one is accessing selem from the 387450af8d0SKP Singh * bucket->list under rcu_read_lock(). 388450af8d0SKP Singh */ 389450af8d0SKP Singh } 390450af8d0SKP Singh 391450af8d0SKP Singh return 0; 392450af8d0SKP Singh 393450af8d0SKP Singh uncharge: 394450af8d0SKP Singh kfree(storage); 395450af8d0SKP Singh mem_uncharge(smap, owner, sizeof(*storage)); 396450af8d0SKP Singh return err; 397450af8d0SKP Singh } 398450af8d0SKP Singh 399450af8d0SKP Singh /* sk cannot be going away because it is linking new elem 400450af8d0SKP Singh * to sk->sk_bpf_storage. (i.e. sk->sk_refcnt cannot be 0). 401450af8d0SKP Singh * Otherwise, it will become a leak (and other memory issues 402450af8d0SKP Singh * during map destruction). 403450af8d0SKP Singh */ 404450af8d0SKP Singh struct bpf_local_storage_data * 405450af8d0SKP Singh bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap, 406b00fa38aSJoanne Koong void *value, u64 map_flags, gfp_t gfp_flags) 407450af8d0SKP Singh { 408450af8d0SKP Singh struct bpf_local_storage_data *old_sdata = NULL; 409b00fa38aSJoanne Koong struct bpf_local_storage_elem *selem = NULL; 410450af8d0SKP Singh struct bpf_local_storage *local_storage; 411a10787e6SSong Liu unsigned long flags; 412450af8d0SKP Singh int err; 413450af8d0SKP Singh 414450af8d0SKP Singh /* BPF_EXIST and BPF_NOEXIST cannot be both set */ 415450af8d0SKP Singh if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) || 416450af8d0SKP Singh /* BPF_F_LOCK can only be used in a value with spin_lock */ 417450af8d0SKP Singh unlikely((map_flags & BPF_F_LOCK) && 418db559117SKumar Kartikeya Dwivedi !btf_record_has_field(smap->map.record, BPF_SPIN_LOCK))) 419450af8d0SKP Singh return ERR_PTR(-EINVAL); 420450af8d0SKP Singh 421b00fa38aSJoanne Koong if (gfp_flags == GFP_KERNEL && (map_flags & ~BPF_F_LOCK) != BPF_NOEXIST) 422b00fa38aSJoanne Koong return ERR_PTR(-EINVAL); 423b00fa38aSJoanne Koong 4240fe4b381SKP Singh local_storage = rcu_dereference_check(*owner_storage(smap, owner), 4250fe4b381SKP Singh bpf_rcu_lock_held()); 426450af8d0SKP Singh if (!local_storage || hlist_empty(&local_storage->list)) { 427450af8d0SKP Singh /* Very first elem for the owner */ 428450af8d0SKP Singh err = check_flags(NULL, map_flags); 429450af8d0SKP Singh if (err) 430450af8d0SKP Singh return ERR_PTR(err); 431450af8d0SKP Singh 432b00fa38aSJoanne Koong selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags); 433450af8d0SKP Singh if (!selem) 434450af8d0SKP Singh return ERR_PTR(-ENOMEM); 435450af8d0SKP Singh 436b00fa38aSJoanne Koong err = bpf_local_storage_alloc(owner, smap, selem, gfp_flags); 437450af8d0SKP Singh if (err) { 438c0d63f30SMartin KaFai Lau bpf_selem_free(selem, smap, true); 439450af8d0SKP Singh mem_uncharge(smap, owner, smap->elem_size); 440450af8d0SKP Singh return ERR_PTR(err); 441450af8d0SKP Singh } 442450af8d0SKP Singh 443450af8d0SKP Singh return SDATA(selem); 444450af8d0SKP Singh } 445450af8d0SKP Singh 446450af8d0SKP Singh if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) { 447450af8d0SKP Singh /* Hoping to find an old_sdata to do inline update 448450af8d0SKP Singh * such that it can avoid taking the local_storage->lock 449450af8d0SKP Singh * and changing the lists. 450450af8d0SKP Singh */ 451450af8d0SKP Singh old_sdata = 452450af8d0SKP Singh bpf_local_storage_lookup(local_storage, smap, false); 453450af8d0SKP Singh err = check_flags(old_sdata, map_flags); 454450af8d0SKP Singh if (err) 455450af8d0SKP Singh return ERR_PTR(err); 4560a09a2f9SKumar Kartikeya Dwivedi if (old_sdata && selem_linked_to_storage_lockless(SELEM(old_sdata))) { 457450af8d0SKP Singh copy_map_value_locked(&smap->map, old_sdata->data, 458450af8d0SKP Singh value, false); 459450af8d0SKP Singh return old_sdata; 460450af8d0SKP Singh } 461450af8d0SKP Singh } 462450af8d0SKP Singh 463b00fa38aSJoanne Koong if (gfp_flags == GFP_KERNEL) { 464b00fa38aSJoanne Koong selem = bpf_selem_alloc(smap, owner, value, true, gfp_flags); 465b00fa38aSJoanne Koong if (!selem) 466b00fa38aSJoanne Koong return ERR_PTR(-ENOMEM); 467b00fa38aSJoanne Koong } 468b00fa38aSJoanne Koong 469a10787e6SSong Liu raw_spin_lock_irqsave(&local_storage->lock, flags); 470450af8d0SKP Singh 471450af8d0SKP Singh /* Recheck local_storage->list under local_storage->lock */ 472450af8d0SKP Singh if (unlikely(hlist_empty(&local_storage->list))) { 473450af8d0SKP Singh /* A parallel del is happening and local_storage is going 474450af8d0SKP Singh * away. It has just been checked before, so very 475450af8d0SKP Singh * unlikely. Return instead of retry to keep things 476450af8d0SKP Singh * simple. 477450af8d0SKP Singh */ 478450af8d0SKP Singh err = -EAGAIN; 479450af8d0SKP Singh goto unlock_err; 480450af8d0SKP Singh } 481450af8d0SKP Singh 482450af8d0SKP Singh old_sdata = bpf_local_storage_lookup(local_storage, smap, false); 483450af8d0SKP Singh err = check_flags(old_sdata, map_flags); 484450af8d0SKP Singh if (err) 485450af8d0SKP Singh goto unlock_err; 486450af8d0SKP Singh 487450af8d0SKP Singh if (old_sdata && (map_flags & BPF_F_LOCK)) { 488450af8d0SKP Singh copy_map_value_locked(&smap->map, old_sdata->data, value, 489450af8d0SKP Singh false); 490450af8d0SKP Singh selem = SELEM(old_sdata); 491450af8d0SKP Singh goto unlock; 492450af8d0SKP Singh } 493450af8d0SKP Singh 494b00fa38aSJoanne Koong if (gfp_flags != GFP_KERNEL) { 495450af8d0SKP Singh /* local_storage->lock is held. Hence, we are sure 496450af8d0SKP Singh * we can unlink and uncharge the old_sdata successfully 497450af8d0SKP Singh * later. Hence, instead of charging the new selem now 498450af8d0SKP Singh * and then uncharge the old selem later (which may cause 499450af8d0SKP Singh * a potential but unnecessary charge failure), avoid taking 500450af8d0SKP Singh * a charge at all here (the "!old_sdata" check) and the 501450af8d0SKP Singh * old_sdata will not be uncharged later during 502450af8d0SKP Singh * bpf_selem_unlink_storage_nolock(). 503450af8d0SKP Singh */ 504b00fa38aSJoanne Koong selem = bpf_selem_alloc(smap, owner, value, !old_sdata, gfp_flags); 505450af8d0SKP Singh if (!selem) { 506450af8d0SKP Singh err = -ENOMEM; 507450af8d0SKP Singh goto unlock_err; 508450af8d0SKP Singh } 509b00fa38aSJoanne Koong } 510450af8d0SKP Singh 511450af8d0SKP Singh /* First, link the new selem to the map */ 512450af8d0SKP Singh bpf_selem_link_map(smap, selem); 513450af8d0SKP Singh 514450af8d0SKP Singh /* Second, link (and publish) the new selem to local_storage */ 515450af8d0SKP Singh bpf_selem_link_storage_nolock(local_storage, selem); 516450af8d0SKP Singh 517450af8d0SKP Singh /* Third, remove old selem, SELEM(old_sdata) */ 518450af8d0SKP Singh if (old_sdata) { 519450af8d0SKP Singh bpf_selem_unlink_map(SELEM(old_sdata)); 520450af8d0SKP Singh bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata), 521a47eabf2SMartin KaFai Lau false, false); 522450af8d0SKP Singh } 523450af8d0SKP Singh 524450af8d0SKP Singh unlock: 525a10787e6SSong Liu raw_spin_unlock_irqrestore(&local_storage->lock, flags); 526450af8d0SKP Singh return SDATA(selem); 527450af8d0SKP Singh 528450af8d0SKP Singh unlock_err: 529a10787e6SSong Liu raw_spin_unlock_irqrestore(&local_storage->lock, flags); 530b00fa38aSJoanne Koong if (selem) { 531b00fa38aSJoanne Koong mem_uncharge(smap, owner, smap->elem_size); 532c0d63f30SMartin KaFai Lau bpf_selem_free(selem, smap, true); 533b00fa38aSJoanne Koong } 534450af8d0SKP Singh return ERR_PTR(err); 535450af8d0SKP Singh } 536450af8d0SKP Singh 537c83597faSYonghong Song static u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache) 538450af8d0SKP Singh { 539450af8d0SKP Singh u64 min_usage = U64_MAX; 540450af8d0SKP Singh u16 i, res = 0; 541450af8d0SKP Singh 542450af8d0SKP Singh spin_lock(&cache->idx_lock); 543450af8d0SKP Singh 544450af8d0SKP Singh for (i = 0; i < BPF_LOCAL_STORAGE_CACHE_SIZE; i++) { 545450af8d0SKP Singh if (cache->idx_usage_counts[i] < min_usage) { 546450af8d0SKP Singh min_usage = cache->idx_usage_counts[i]; 547450af8d0SKP Singh res = i; 548450af8d0SKP Singh 549450af8d0SKP Singh /* Found a free cache_idx */ 550450af8d0SKP Singh if (!min_usage) 551450af8d0SKP Singh break; 552450af8d0SKP Singh } 553450af8d0SKP Singh } 554450af8d0SKP Singh cache->idx_usage_counts[res]++; 555450af8d0SKP Singh 556450af8d0SKP Singh spin_unlock(&cache->idx_lock); 557450af8d0SKP Singh 558450af8d0SKP Singh return res; 559450af8d0SKP Singh } 560450af8d0SKP Singh 561c83597faSYonghong Song static void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache, 562450af8d0SKP Singh u16 idx) 563450af8d0SKP Singh { 564450af8d0SKP Singh spin_lock(&cache->idx_lock); 565450af8d0SKP Singh cache->idx_usage_counts[idx]--; 566450af8d0SKP Singh spin_unlock(&cache->idx_lock); 567450af8d0SKP Singh } 568450af8d0SKP Singh 569c83597faSYonghong Song int bpf_local_storage_map_alloc_check(union bpf_attr *attr) 570c83597faSYonghong Song { 571c83597faSYonghong Song if (attr->map_flags & ~BPF_LOCAL_STORAGE_CREATE_FLAG_MASK || 572c83597faSYonghong Song !(attr->map_flags & BPF_F_NO_PREALLOC) || 573c83597faSYonghong Song attr->max_entries || 574c83597faSYonghong Song attr->key_size != sizeof(int) || !attr->value_size || 575c83597faSYonghong Song /* Enforce BTF for userspace sk dumping */ 576c83597faSYonghong Song !attr->btf_key_type_id || !attr->btf_value_type_id) 577c83597faSYonghong Song return -EINVAL; 578c83597faSYonghong Song 579c83597faSYonghong Song if (!bpf_capable()) 580c83597faSYonghong Song return -EPERM; 581c83597faSYonghong Song 582c83597faSYonghong Song if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE) 583c83597faSYonghong Song return -E2BIG; 584c83597faSYonghong Song 585c83597faSYonghong Song return 0; 586c83597faSYonghong Song } 587c83597faSYonghong Song 588c83597faSYonghong Song int bpf_local_storage_map_check_btf(const struct bpf_map *map, 589c83597faSYonghong Song const struct btf *btf, 590c83597faSYonghong Song const struct btf_type *key_type, 591c83597faSYonghong Song const struct btf_type *value_type) 592c83597faSYonghong Song { 593c83597faSYonghong Song u32 int_data; 594c83597faSYonghong Song 595c83597faSYonghong Song if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT) 596c83597faSYonghong Song return -EINVAL; 597c83597faSYonghong Song 598c83597faSYonghong Song int_data = *(u32 *)(key_type + 1); 599c83597faSYonghong Song if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data)) 600c83597faSYonghong Song return -EINVAL; 601c83597faSYonghong Song 602c83597faSYonghong Song return 0; 603c83597faSYonghong Song } 604c83597faSYonghong Song 6052ffcb6fcSMartin KaFai Lau void bpf_local_storage_destroy(struct bpf_local_storage *local_storage) 606450af8d0SKP Singh { 607450af8d0SKP Singh struct bpf_local_storage_elem *selem; 608c83597faSYonghong Song bool free_storage = false; 609c83597faSYonghong Song struct hlist_node *n; 6102ffcb6fcSMartin KaFai Lau unsigned long flags; 611c83597faSYonghong Song 612c83597faSYonghong Song /* Neither the bpf_prog nor the bpf_map's syscall 613c83597faSYonghong Song * could be modifying the local_storage->list now. 614c83597faSYonghong Song * Thus, no elem can be added to or deleted from the 615c83597faSYonghong Song * local_storage->list by the bpf_prog or by the bpf_map's syscall. 616c83597faSYonghong Song * 617c83597faSYonghong Song * It is racing with bpf_local_storage_map_free() alone 618c83597faSYonghong Song * when unlinking elem from the local_storage->list and 619c83597faSYonghong Song * the map's bucket->list. 620c83597faSYonghong Song */ 6212ffcb6fcSMartin KaFai Lau raw_spin_lock_irqsave(&local_storage->lock, flags); 622c83597faSYonghong Song hlist_for_each_entry_safe(selem, n, &local_storage->list, snode) { 623c83597faSYonghong Song /* Always unlink from map before unlinking from 624c83597faSYonghong Song * local_storage. 625c83597faSYonghong Song */ 626c83597faSYonghong Song bpf_selem_unlink_map(selem); 627c83597faSYonghong Song /* If local_storage list has only one element, the 628c83597faSYonghong Song * bpf_selem_unlink_storage_nolock() will return true. 629c83597faSYonghong Song * Otherwise, it will return false. The current loop iteration 630c83597faSYonghong Song * intends to remove all local storage. So the last iteration 631c83597faSYonghong Song * of the loop will set the free_cgroup_storage to true. 632c83597faSYonghong Song */ 633c83597faSYonghong Song free_storage = bpf_selem_unlink_storage_nolock( 634a47eabf2SMartin KaFai Lau local_storage, selem, false, true); 635c83597faSYonghong Song } 6362ffcb6fcSMartin KaFai Lau raw_spin_unlock_irqrestore(&local_storage->lock, flags); 637c83597faSYonghong Song 6382ffcb6fcSMartin KaFai Lau if (free_storage) 639*1288aaa2SMartin KaFai Lau call_rcu(&local_storage->rcu, bpf_local_storage_free_rcu); 640c83597faSYonghong Song } 641c83597faSYonghong Song 6427490b7f1SYafang Shao u64 bpf_local_storage_map_mem_usage(const struct bpf_map *map) 6437490b7f1SYafang Shao { 6447490b7f1SYafang Shao struct bpf_local_storage_map *smap = (struct bpf_local_storage_map *)map; 6457490b7f1SYafang Shao u64 usage = sizeof(*smap); 6467490b7f1SYafang Shao 6477490b7f1SYafang Shao /* The dynamically callocated selems are not counted currently. */ 6487490b7f1SYafang Shao usage += sizeof(*smap->buckets) * (1ULL << smap->bucket_log); 6497490b7f1SYafang Shao return usage; 6507490b7f1SYafang Shao } 6517490b7f1SYafang Shao 652c83597faSYonghong Song struct bpf_map * 653c83597faSYonghong Song bpf_local_storage_map_alloc(union bpf_attr *attr, 654c83597faSYonghong Song struct bpf_local_storage_cache *cache) 655c83597faSYonghong Song { 656c83597faSYonghong Song struct bpf_local_storage_map *smap; 65762827d61SMartin KaFai Lau unsigned int i; 65862827d61SMartin KaFai Lau u32 nbuckets; 659c83597faSYonghong Song 66062827d61SMartin KaFai Lau smap = bpf_map_area_alloc(sizeof(*smap), NUMA_NO_NODE); 66162827d61SMartin KaFai Lau if (!smap) 66262827d61SMartin KaFai Lau return ERR_PTR(-ENOMEM); 66362827d61SMartin KaFai Lau bpf_map_init_from_attr(&smap->map, attr); 66462827d61SMartin KaFai Lau 66562827d61SMartin KaFai Lau nbuckets = roundup_pow_of_two(num_possible_cpus()); 66662827d61SMartin KaFai Lau /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */ 66762827d61SMartin KaFai Lau nbuckets = max_t(u32, 2, nbuckets); 66862827d61SMartin KaFai Lau smap->bucket_log = ilog2(nbuckets); 66962827d61SMartin KaFai Lau 67062827d61SMartin KaFai Lau smap->buckets = bpf_map_kvcalloc(&smap->map, sizeof(*smap->buckets), 67162827d61SMartin KaFai Lau nbuckets, GFP_USER | __GFP_NOWARN); 67262827d61SMartin KaFai Lau if (!smap->buckets) { 67362827d61SMartin KaFai Lau bpf_map_area_free(smap); 67462827d61SMartin KaFai Lau return ERR_PTR(-ENOMEM); 67562827d61SMartin KaFai Lau } 67662827d61SMartin KaFai Lau 67762827d61SMartin KaFai Lau for (i = 0; i < nbuckets; i++) { 67862827d61SMartin KaFai Lau INIT_HLIST_HEAD(&smap->buckets[i].list); 67962827d61SMartin KaFai Lau raw_spin_lock_init(&smap->buckets[i].lock); 68062827d61SMartin KaFai Lau } 68162827d61SMartin KaFai Lau 68262827d61SMartin KaFai Lau smap->elem_size = offsetof(struct bpf_local_storage_elem, 68362827d61SMartin KaFai Lau sdata.data[attr->value_size]); 684c83597faSYonghong Song 685c83597faSYonghong Song smap->cache_idx = bpf_local_storage_cache_idx_get(cache); 686c83597faSYonghong Song return &smap->map; 687c83597faSYonghong Song } 688c83597faSYonghong Song 689c83597faSYonghong Song void bpf_local_storage_map_free(struct bpf_map *map, 690c83597faSYonghong Song struct bpf_local_storage_cache *cache, 691c83597faSYonghong Song int __percpu *busy_counter) 692c83597faSYonghong Song { 693450af8d0SKP Singh struct bpf_local_storage_map_bucket *b; 694c83597faSYonghong Song struct bpf_local_storage_elem *selem; 695c83597faSYonghong Song struct bpf_local_storage_map *smap; 696450af8d0SKP Singh unsigned int i; 697450af8d0SKP Singh 698c83597faSYonghong Song smap = (struct bpf_local_storage_map *)map; 699c83597faSYonghong Song bpf_local_storage_cache_idx_free(cache, smap->cache_idx); 700c83597faSYonghong Song 701450af8d0SKP Singh /* Note that this map might be concurrently cloned from 702450af8d0SKP Singh * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone 703450af8d0SKP Singh * RCU read section to finish before proceeding. New RCU 704450af8d0SKP Singh * read sections should be prevented via bpf_map_inc_not_zero. 705450af8d0SKP Singh */ 706450af8d0SKP Singh synchronize_rcu(); 707450af8d0SKP Singh 708450af8d0SKP Singh /* bpf prog and the userspace can no longer access this map 709450af8d0SKP Singh * now. No new selem (of this map) can be added 710450af8d0SKP Singh * to the owner->storage or to the map bucket's list. 711450af8d0SKP Singh * 712450af8d0SKP Singh * The elem of this map can be cleaned up here 713450af8d0SKP Singh * or when the storage is freed e.g. 714450af8d0SKP Singh * by bpf_sk_storage_free() during __sk_destruct(). 715450af8d0SKP Singh */ 716450af8d0SKP Singh for (i = 0; i < (1U << smap->bucket_log); i++) { 717450af8d0SKP Singh b = &smap->buckets[i]; 718450af8d0SKP Singh 719450af8d0SKP Singh rcu_read_lock(); 720450af8d0SKP Singh /* No one is adding to b->list now */ 721450af8d0SKP Singh while ((selem = hlist_entry_safe( 722450af8d0SKP Singh rcu_dereference_raw(hlist_first_rcu(&b->list)), 723450af8d0SKP Singh struct bpf_local_storage_elem, map_node))) { 724bc235cdbSSong Liu if (busy_counter) { 725bc235cdbSSong Liu migrate_disable(); 726197827a0SHou Tao this_cpu_inc(*busy_counter); 727bc235cdbSSong Liu } 728a47eabf2SMartin KaFai Lau bpf_selem_unlink(selem, true); 729bc235cdbSSong Liu if (busy_counter) { 730197827a0SHou Tao this_cpu_dec(*busy_counter); 731bc235cdbSSong Liu migrate_enable(); 732bc235cdbSSong Liu } 733450af8d0SKP Singh cond_resched_rcu(); 734450af8d0SKP Singh } 735450af8d0SKP Singh rcu_read_unlock(); 736450af8d0SKP Singh } 737450af8d0SKP Singh 738450af8d0SKP Singh /* While freeing the storage we may still need to access the map. 739450af8d0SKP Singh * 740450af8d0SKP Singh * e.g. when bpf_sk_storage_free() has unlinked selem from the map 741450af8d0SKP Singh * which then made the above while((selem = ...)) loop 742450af8d0SKP Singh * exit immediately. 743450af8d0SKP Singh * 744450af8d0SKP Singh * However, while freeing the storage one still needs to access the 745450af8d0SKP Singh * smap->elem_size to do the uncharging in 746450af8d0SKP Singh * bpf_selem_unlink_storage_nolock(). 747450af8d0SKP Singh * 748450af8d0SKP Singh * Hence, wait another rcu grace period for the storage to be freed. 749450af8d0SKP Singh */ 750450af8d0SKP Singh synchronize_rcu(); 751450af8d0SKP Singh 752450af8d0SKP Singh kvfree(smap->buckets); 75373cf09a3SYafang Shao bpf_map_area_free(smap); 754450af8d0SKP Singh } 755