Lines Matching refs:pool

82 	u32 offset = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type);  in mlx5dr_icm_pool_get_chunk_mr_addr()
94 u32 size = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type); in mlx5dr_icm_pool_get_chunk_icm_addr()
102 chunk->buddy_mem->pool->icm_type); in mlx5dr_icm_pool_get_chunk_byte_size()
111 dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool) in dr_icm_pool_mr_create() argument
113 struct mlx5_core_dev *mdev = pool->dmn->mdev; in dr_icm_pool_mr_create()
123 icm_mr->dmn = pool->dmn; in dr_icm_pool_mr_create()
125 icm_mr->dm.length = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz, in dr_icm_pool_mr_create()
126 pool->icm_type); in dr_icm_pool_mr_create()
128 switch (pool->icm_type) { in dr_icm_pool_mr_create()
144 WARN_ON(pool->icm_type); in dr_icm_pool_mr_create()
153 mlx5dr_err(pool->dmn, "Failed to allocate SW ICM memory, err (%d)\n", err); in dr_icm_pool_mr_create()
158 err = dr_icm_create_dm_mkey(mdev, pool->dmn->pdn, in dr_icm_pool_mr_create()
164 mlx5dr_err(pool->dmn, "Failed to create SW ICM MKEY, err (%d)\n", err); in dr_icm_pool_mr_create()
171 mlx5dr_err(pool->dmn, "Failed to get Aligned ICM mem (asked: %zu)\n", in dr_icm_pool_mr_create()
228 mlx5dr_icm_pool_chunk_size_to_entries(buddy->pool->max_log_chunk_sz); in dr_icm_buddy_init_ste_cache()
263 static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool) in dr_icm_buddy_create() argument
268 icm_mr = dr_icm_pool_mr_create(pool); in dr_icm_buddy_create()
276 if (mlx5dr_buddy_init(buddy, pool->max_log_chunk_sz)) in dr_icm_buddy_create()
280 buddy->pool = pool; in dr_icm_buddy_create()
282 if (pool->icm_type == DR_ICM_TYPE_STE) { in dr_icm_buddy_create()
289 list_add(&buddy->list_node, &pool->buddy_mem_list); in dr_icm_buddy_create()
291 pool->dmn->num_buddies[pool->icm_type]++; in dr_icm_buddy_create()
306 enum mlx5dr_icm_type icm_type = buddy->pool->icm_type; in dr_icm_buddy_destroy()
315 buddy->pool->dmn->num_buddies[icm_type]--; in dr_icm_buddy_destroy()
322 struct mlx5dr_icm_pool *pool, in dr_icm_chunk_init() argument
333 if (pool->icm_type == DR_ICM_TYPE_STE) { in dr_icm_chunk_init()
334 offset = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type) * seg; in dr_icm_chunk_init()
341 static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool) in dr_icm_pool_is_sync_required() argument
343 return pool->hot_memory_size > pool->th; in dr_icm_pool_is_sync_required()
346 static void dr_icm_pool_clear_hot_chunks_arr(struct mlx5dr_icm_pool *pool) in dr_icm_pool_clear_hot_chunks_arr() argument
351 for (i = 0; i < pool->hot_chunks_num; i++) { in dr_icm_pool_clear_hot_chunks_arr()
352 hot_chunk = &pool->hot_chunks_arr[i]; in dr_icm_pool_clear_hot_chunks_arr()
358 pool->icm_type); in dr_icm_pool_clear_hot_chunks_arr()
361 pool->hot_chunks_num = 0; in dr_icm_pool_clear_hot_chunks_arr()
362 pool->hot_memory_size = 0; in dr_icm_pool_clear_hot_chunks_arr()
365 static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool) in dr_icm_pool_sync_all_buddy_pools() argument
370 err = mlx5dr_cmd_sync_steering(pool->dmn->mdev); in dr_icm_pool_sync_all_buddy_pools()
372 mlx5dr_err(pool->dmn, "Failed to sync to HW (err: %d)\n", err); in dr_icm_pool_sync_all_buddy_pools()
376 dr_icm_pool_clear_hot_chunks_arr(pool); in dr_icm_pool_sync_all_buddy_pools()
378 list_for_each_entry_safe(buddy, tmp_buddy, &pool->buddy_mem_list, list_node) { in dr_icm_pool_sync_all_buddy_pools()
379 if (!buddy->used_memory && pool->icm_type == DR_ICM_TYPE_STE) in dr_icm_pool_sync_all_buddy_pools()
386 static int dr_icm_handle_buddies_get_mem(struct mlx5dr_icm_pool *pool, in dr_icm_handle_buddies_get_mem() argument
397 list_for_each_entry(buddy_mem_pool, &pool->buddy_mem_list, list_node) { in dr_icm_handle_buddies_get_mem()
405 mlx5dr_err(pool->dmn, in dr_icm_handle_buddies_get_mem()
413 err = dr_icm_buddy_create(pool); in dr_icm_handle_buddies_get_mem()
415 mlx5dr_err(pool->dmn, in dr_icm_handle_buddies_get_mem()
435 mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool, in mlx5dr_icm_alloc_chunk() argument
443 if (chunk_size > pool->max_log_chunk_sz) in mlx5dr_icm_alloc_chunk()
446 mutex_lock(&pool->mutex); in mlx5dr_icm_alloc_chunk()
448 ret = dr_icm_handle_buddies_get_mem(pool, chunk_size, &buddy, &seg); in mlx5dr_icm_alloc_chunk()
452 chunk = kmem_cache_alloc(pool->chunks_kmem_cache, GFP_KERNEL); in mlx5dr_icm_alloc_chunk()
456 dr_icm_chunk_init(chunk, pool, chunk_size, buddy, seg); in mlx5dr_icm_alloc_chunk()
463 mutex_unlock(&pool->mutex); in mlx5dr_icm_alloc_chunk()
470 struct mlx5dr_icm_pool *pool = buddy->pool; in mlx5dr_icm_free_chunk() local
474 chunks_cache = pool->chunks_kmem_cache; in mlx5dr_icm_free_chunk()
477 mutex_lock(&pool->mutex); in mlx5dr_icm_free_chunk()
479 pool->hot_memory_size += mlx5dr_icm_pool_get_chunk_byte_size(chunk); in mlx5dr_icm_free_chunk()
481 hot_chunk = &pool->hot_chunks_arr[pool->hot_chunks_num++]; in mlx5dr_icm_free_chunk()
489 if (dr_icm_pool_is_sync_required(pool)) in mlx5dr_icm_free_chunk()
490 dr_icm_pool_sync_all_buddy_pools(pool); in mlx5dr_icm_free_chunk()
492 mutex_unlock(&pool->mutex); in mlx5dr_icm_free_chunk()
495 struct mlx5dr_ste_htbl *mlx5dr_icm_pool_alloc_htbl(struct mlx5dr_icm_pool *pool) in mlx5dr_icm_pool_alloc_htbl() argument
497 return kmem_cache_alloc(pool->dmn->htbls_kmem_cache, GFP_KERNEL); in mlx5dr_icm_pool_alloc_htbl()
500 void mlx5dr_icm_pool_free_htbl(struct mlx5dr_icm_pool *pool, struct mlx5dr_ste_htbl *htbl) in mlx5dr_icm_pool_free_htbl() argument
502 kmem_cache_free(pool->dmn->htbls_kmem_cache, htbl); in mlx5dr_icm_pool_free_htbl()
509 struct mlx5dr_icm_pool *pool; in mlx5dr_icm_pool_create() local
512 pool = kvzalloc(sizeof(*pool), GFP_KERNEL); in mlx5dr_icm_pool_create()
513 if (!pool) in mlx5dr_icm_pool_create()
516 pool->dmn = dmn; in mlx5dr_icm_pool_create()
517 pool->icm_type = icm_type; in mlx5dr_icm_pool_create()
518 pool->chunks_kmem_cache = dmn->chunks_kmem_cache; in mlx5dr_icm_pool_create()
520 INIT_LIST_HEAD(&pool->buddy_mem_list); in mlx5dr_icm_pool_create()
521 mutex_init(&pool->mutex); in mlx5dr_icm_pool_create()
525 pool->max_log_chunk_sz = dmn->info.max_log_sw_icm_sz; in mlx5dr_icm_pool_create()
526 max_hot_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz, in mlx5dr_icm_pool_create()
527 pool->icm_type) * in mlx5dr_icm_pool_create()
531 pool->max_log_chunk_sz = dmn->info.max_log_action_icm_sz; in mlx5dr_icm_pool_create()
532 max_hot_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz, in mlx5dr_icm_pool_create()
533 pool->icm_type) * in mlx5dr_icm_pool_create()
537 pool->max_log_chunk_sz = dmn->info.max_log_modify_hdr_pattern_icm_sz; in mlx5dr_icm_pool_create()
538 max_hot_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz, in mlx5dr_icm_pool_create()
539 pool->icm_type) * in mlx5dr_icm_pool_create()
546 entry_size = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type); in mlx5dr_icm_pool_create()
549 pool->th = max_hot_size; in mlx5dr_icm_pool_create()
551 pool->hot_chunks_arr = kvcalloc(num_of_chunks, in mlx5dr_icm_pool_create()
554 if (!pool->hot_chunks_arr) in mlx5dr_icm_pool_create()
557 return pool; in mlx5dr_icm_pool_create()
560 kvfree(pool); in mlx5dr_icm_pool_create()
564 void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool) in mlx5dr_icm_pool_destroy() argument
568 dr_icm_pool_clear_hot_chunks_arr(pool); in mlx5dr_icm_pool_destroy()
570 list_for_each_entry_safe(buddy, tmp_buddy, &pool->buddy_mem_list, list_node) in mlx5dr_icm_pool_destroy()
573 kvfree(pool->hot_chunks_arr); in mlx5dr_icm_pool_destroy()
574 mutex_destroy(&pool->mutex); in mlx5dr_icm_pool_destroy()
575 kvfree(pool); in mlx5dr_icm_pool_destroy()