Lines Matching refs:dtab

104 static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab,  in dev_map_index_hash()  argument
107 return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)]; in dev_map_index_hash()
110 static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) in dev_map_init_map() argument
130 bpf_map_init_from_attr(&dtab->map, attr); in dev_map_init_map()
136 if (dtab->map.max_entries > 1UL << 31) in dev_map_init_map()
139 dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries); in dev_map_init_map()
141 dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets, in dev_map_init_map()
142 dtab->map.numa_node); in dev_map_init_map()
143 if (!dtab->dev_index_head) in dev_map_init_map()
146 spin_lock_init(&dtab->index_lock); in dev_map_init_map()
148 dtab->netdev_map = bpf_map_area_alloc((u64) dtab->map.max_entries * in dev_map_init_map()
150 dtab->map.numa_node); in dev_map_init_map()
151 if (!dtab->netdev_map) in dev_map_init_map()
160 struct bpf_dtab *dtab; in dev_map_alloc() local
163 dtab = bpf_map_area_alloc(sizeof(*dtab), NUMA_NO_NODE); in dev_map_alloc()
164 if (!dtab) in dev_map_alloc()
167 err = dev_map_init_map(dtab, attr); in dev_map_alloc()
169 bpf_map_area_free(dtab); in dev_map_alloc()
174 list_add_tail_rcu(&dtab->list, &dev_map_list); in dev_map_alloc()
177 return &dtab->map; in dev_map_alloc()
182 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); in dev_map_free() local
196 list_del_rcu(&dtab->list); in dev_map_free()
205 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) { in dev_map_free()
206 for (i = 0; i < dtab->n_buckets; i++) { in dev_map_free()
211 head = dev_map_index_hash(dtab, i); in dev_map_free()
222 bpf_map_area_free(dtab->dev_index_head); in dev_map_free()
224 for (i = 0; i < dtab->map.max_entries; i++) { in dev_map_free()
227 dev = rcu_dereference_raw(dtab->netdev_map[i]); in dev_map_free()
237 bpf_map_area_free(dtab->netdev_map); in dev_map_free()
240 bpf_map_area_free(dtab); in dev_map_free()
245 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); in dev_map_get_next_key() local
249 if (index >= dtab->map.max_entries) { in dev_map_get_next_key()
254 if (index == dtab->map.max_entries - 1) in dev_map_get_next_key()
266 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); in __dev_map_hash_lookup_elem() local
267 struct hlist_head *head = dev_map_index_hash(dtab, key); in __dev_map_hash_lookup_elem()
271 lockdep_is_held(&dtab->index_lock)) in __dev_map_hash_lookup_elem()
281 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); in dev_map_hash_get_next_key() local
304 i = idx & (dtab->n_buckets - 1); in dev_map_hash_get_next_key()
308 for (; i < dtab->n_buckets; i++) { in dev_map_hash_get_next_key()
309 head = dev_map_index_hash(dtab, i); in dev_map_hash_get_next_key()
428 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); in __dev_map_lookup_elem() local
434 obj = rcu_dereference_check(dtab->netdev_map[key], in __dev_map_lookup_elem()
595 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); in dev_map_enqueue_multi() local
610 dst = rcu_dereference_check(dtab->netdev_map[i], in dev_map_enqueue_multi()
631 for (i = 0; i < dtab->n_buckets; i++) { in dev_map_enqueue_multi()
632 head = dev_map_index_hash(dtab, i); in dev_map_enqueue_multi()
634 lockdep_is_held(&dtab->index_lock)) { in dev_map_enqueue_multi()
712 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); in dev_map_redirect_multi() local
728 dst = rcu_dereference_check(dtab->netdev_map[i], in dev_map_redirect_multi()
750 for (i = 0; i < dtab->n_buckets; i++) { in dev_map_redirect_multi()
751 head = dev_map_index_hash(dtab, i); in dev_map_redirect_multi()
811 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); in dev_map_delete_elem() local
818 old_dev = unrcu_pointer(xchg(&dtab->netdev_map[k], NULL)); in dev_map_delete_elem()
821 atomic_dec((atomic_t *)&dtab->items); in dev_map_delete_elem()
828 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); in dev_map_hash_delete_elem() local
834 spin_lock_irqsave(&dtab->index_lock, flags); in dev_map_hash_delete_elem()
838 dtab->items--; in dev_map_hash_delete_elem()
843 spin_unlock_irqrestore(&dtab->index_lock, flags); in dev_map_hash_delete_elem()
849 struct bpf_dtab *dtab, in __dev_map_alloc_node() argument
856 dev = bpf_map_kmalloc_node(&dtab->map, sizeof(*dev), in __dev_map_alloc_node()
858 dtab->map.numa_node); in __dev_map_alloc_node()
872 !bpf_prog_map_compatible(&dtab->map, prog)) in __dev_map_alloc_node()
899 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); in __dev_map_update_elem() local
906 if (unlikely(i >= dtab->map.max_entries)) in __dev_map_update_elem()
920 dev = __dev_map_alloc_node(net, dtab, &val, i); in __dev_map_update_elem()
929 old_dev = unrcu_pointer(xchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev))); in __dev_map_update_elem()
933 atomic_inc((atomic_t *)&dtab->items); in __dev_map_update_elem()
948 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); in __dev_map_hash_update_elem() local
961 spin_lock_irqsave(&dtab->index_lock, flags); in __dev_map_hash_update_elem()
967 dev = __dev_map_alloc_node(net, dtab, &val, idx); in __dev_map_hash_update_elem()
976 if (dtab->items >= dtab->map.max_entries) { in __dev_map_hash_update_elem()
977 spin_unlock_irqrestore(&dtab->index_lock, flags); in __dev_map_hash_update_elem()
981 dtab->items++; in __dev_map_hash_update_elem()
985 dev_map_index_hash(dtab, idx)); in __dev_map_hash_update_elem()
986 spin_unlock_irqrestore(&dtab->index_lock, flags); in __dev_map_hash_update_elem()
994 spin_unlock_irqrestore(&dtab->index_lock, flags); in __dev_map_hash_update_elem()
1021 struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map); in dev_map_mem_usage() local
1025 usage += (u64)dtab->n_buckets * sizeof(struct hlist_head); in dev_map_mem_usage()
1028 usage += atomic_read((atomic_t *)&dtab->items) * in dev_map_mem_usage()
1062 static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab, in dev_map_hash_remove_netdev() argument
1068 spin_lock_irqsave(&dtab->index_lock, flags); in dev_map_hash_remove_netdev()
1069 for (i = 0; i < dtab->n_buckets; i++) { in dev_map_hash_remove_netdev()
1074 head = dev_map_index_hash(dtab, i); in dev_map_hash_remove_netdev()
1080 dtab->items--; in dev_map_hash_remove_netdev()
1085 spin_unlock_irqrestore(&dtab->index_lock, flags); in dev_map_hash_remove_netdev()
1092 struct bpf_dtab *dtab; in dev_map_notification() local
1115 list_for_each_entry_rcu(dtab, &dev_map_list, list) { in dev_map_notification()
1116 if (dtab->map.map_type == BPF_MAP_TYPE_DEVMAP_HASH) { in dev_map_notification()
1117 dev_map_hash_remove_netdev(dtab, netdev); in dev_map_notification()
1121 for (i = 0; i < dtab->map.max_entries; i++) { in dev_map_notification()
1124 dev = rcu_dereference(dtab->netdev_map[i]); in dev_map_notification()
1127 odev = unrcu_pointer(cmpxchg(&dtab->netdev_map[i], RCU_INITIALIZER(dev), NULL)); in dev_map_notification()
1131 atomic_dec((atomic_t *)&dtab->items); in dev_map_notification()