Lines Matching refs:pool

54 	struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool;  in mlxsw_sp_counter_sub_pools_init()  local
62 for (i = 0; i < pool->sub_pools_count; i++) { in mlxsw_sp_counter_sub_pools_init()
63 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_init()
89 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_init()
99 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; in mlxsw_sp_counter_sub_pools_fini() local
104 for (i = 0; i < pool->sub_pools_count; i++) { in mlxsw_sp_counter_sub_pools_fini()
105 sub_pool = &pool->sub_pools[i]; in mlxsw_sp_counter_sub_pools_fini()
115 const struct mlxsw_sp_counter_pool *pool = priv; in mlxsw_sp_counter_pool_occ_get() local
117 return atomic_read(&pool->active_entries_count); in mlxsw_sp_counter_pool_occ_get()
124 struct mlxsw_sp_counter_pool *pool; in mlxsw_sp_counter_pool_init() local
127 pool = kzalloc(struct_size(pool, sub_pools, sub_pools_count), in mlxsw_sp_counter_pool_init()
129 if (!pool) in mlxsw_sp_counter_pool_init()
131 mlxsw_sp->counter_pool = pool; in mlxsw_sp_counter_pool_init()
132 pool->sub_pools_count = sub_pools_count; in mlxsw_sp_counter_pool_init()
133 memcpy(pool->sub_pools, mlxsw_sp_counter_sub_pools, in mlxsw_sp_counter_pool_init()
134 flex_array_size(pool, sub_pools, pool->sub_pools_count)); in mlxsw_sp_counter_pool_init()
135 spin_lock_init(&pool->counter_pool_lock); in mlxsw_sp_counter_pool_init()
136 atomic_set(&pool->active_entries_count, 0); in mlxsw_sp_counter_pool_init()
139 &pool->pool_size); in mlxsw_sp_counter_pool_init()
143 mlxsw_sp_counter_pool_occ_get, pool); in mlxsw_sp_counter_pool_init()
145 pool->usage = bitmap_zalloc(pool->pool_size, GFP_KERNEL); in mlxsw_sp_counter_pool_init()
146 if (!pool->usage) { in mlxsw_sp_counter_pool_init()
158 bitmap_free(pool->usage); in mlxsw_sp_counter_pool_init()
163 kfree(pool); in mlxsw_sp_counter_pool_init()
169 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; in mlxsw_sp_counter_pool_fini() local
173 WARN_ON(find_first_bit(pool->usage, pool->pool_size) != in mlxsw_sp_counter_pool_fini()
174 pool->pool_size); in mlxsw_sp_counter_pool_fini()
175 WARN_ON(atomic_read(&pool->active_entries_count)); in mlxsw_sp_counter_pool_fini()
176 bitmap_free(pool->usage); in mlxsw_sp_counter_pool_fini()
179 kfree(pool); in mlxsw_sp_counter_pool_fini()
186 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; in mlxsw_sp_counter_alloc() local
192 sub_pool = &pool->sub_pools[sub_pool_id]; in mlxsw_sp_counter_alloc()
196 spin_lock(&pool->counter_pool_lock); in mlxsw_sp_counter_alloc()
197 entry_index = find_next_zero_bit(pool->usage, stop_index, entry_index); in mlxsw_sp_counter_alloc()
210 __set_bit(entry_index + i, pool->usage); in mlxsw_sp_counter_alloc()
211 spin_unlock(&pool->counter_pool_lock); in mlxsw_sp_counter_alloc()
215 atomic_add(sub_pool->entry_size, &pool->active_entries_count); in mlxsw_sp_counter_alloc()
219 spin_unlock(&pool->counter_pool_lock); in mlxsw_sp_counter_alloc()
227 struct mlxsw_sp_counter_pool *pool = mlxsw_sp->counter_pool; in mlxsw_sp_counter_free() local
231 if (WARN_ON(counter_index >= pool->pool_size)) in mlxsw_sp_counter_free()
233 sub_pool = &pool->sub_pools[sub_pool_id]; in mlxsw_sp_counter_free()
234 spin_lock(&pool->counter_pool_lock); in mlxsw_sp_counter_free()
236 __clear_bit(counter_index + i, pool->usage); in mlxsw_sp_counter_free()
237 spin_unlock(&pool->counter_pool_lock); in mlxsw_sp_counter_free()
239 atomic_sub(sub_pool->entry_size, &pool->active_entries_count); in mlxsw_sp_counter_free()