129cf8febSAlex Vesker // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
229cf8febSAlex Vesker /* Copyright (c) 2019 Mellanox Technologies. */
329cf8febSAlex Vesker 
429cf8febSAlex Vesker #include "dr_types.h"
529cf8febSAlex Vesker 
629cf8febSAlex Vesker #define DR_ICM_MODIFY_HDR_ALIGN_BASE 64
729cf8febSAlex Vesker #define DR_ICM_SYNC_THRESHOLD (64 * 1024 * 1024)
829cf8febSAlex Vesker 
929cf8febSAlex Vesker struct mlx5dr_icm_pool;
1029cf8febSAlex Vesker 
1129cf8febSAlex Vesker struct mlx5dr_icm_bucket {
1229cf8febSAlex Vesker 	struct mlx5dr_icm_pool *pool;
1329cf8febSAlex Vesker 
1429cf8febSAlex Vesker 	/* Chunks that aren't visible to HW not directly and not in cache */
1529cf8febSAlex Vesker 	struct list_head free_list;
1629cf8febSAlex Vesker 	unsigned int free_list_count;
1729cf8febSAlex Vesker 
1829cf8febSAlex Vesker 	/* Used chunks, HW may be accessing this memory */
1929cf8febSAlex Vesker 	struct list_head used_list;
2029cf8febSAlex Vesker 	unsigned int used_list_count;
2129cf8febSAlex Vesker 
2229cf8febSAlex Vesker 	/* HW may be accessing this memory but at some future,
2329cf8febSAlex Vesker 	 * undetermined time, it might cease to do so. Before deciding to call
2429cf8febSAlex Vesker 	 * sync_ste, this list is moved to sync_list
2529cf8febSAlex Vesker 	 */
2629cf8febSAlex Vesker 	struct list_head hot_list;
2729cf8febSAlex Vesker 	unsigned int hot_list_count;
2829cf8febSAlex Vesker 
2929cf8febSAlex Vesker 	/* Pending sync list, entries from the hot list are moved to this list.
3029cf8febSAlex Vesker 	 * sync_ste is executed and then sync_list is concatenated to the free list
3129cf8febSAlex Vesker 	 */
3229cf8febSAlex Vesker 	struct list_head sync_list;
3329cf8febSAlex Vesker 	unsigned int sync_list_count;
3429cf8febSAlex Vesker 
3529cf8febSAlex Vesker 	u32 total_chunks;
3629cf8febSAlex Vesker 	u32 num_of_entries;
3729cf8febSAlex Vesker 	u32 entry_size;
3829cf8febSAlex Vesker 	/* protect the ICM bucket */
3929cf8febSAlex Vesker 	struct mutex mutex;
4029cf8febSAlex Vesker };
4129cf8febSAlex Vesker 
4229cf8febSAlex Vesker struct mlx5dr_icm_pool {
4329cf8febSAlex Vesker 	struct mlx5dr_icm_bucket *buckets;
4429cf8febSAlex Vesker 	enum mlx5dr_icm_type icm_type;
4529cf8febSAlex Vesker 	enum mlx5dr_icm_chunk_size max_log_chunk_sz;
4629cf8febSAlex Vesker 	enum mlx5dr_icm_chunk_size num_of_buckets;
4729cf8febSAlex Vesker 	struct list_head icm_mr_list;
4829cf8febSAlex Vesker 	/* protect the ICM MR list */
4929cf8febSAlex Vesker 	struct mutex mr_mutex;
5029cf8febSAlex Vesker 	struct mlx5dr_domain *dmn;
5129cf8febSAlex Vesker };
5229cf8febSAlex Vesker 
5329cf8febSAlex Vesker struct mlx5dr_icm_dm {
5429cf8febSAlex Vesker 	u32 obj_id;
5529cf8febSAlex Vesker 	enum mlx5_sw_icm_type type;
5629cf8febSAlex Vesker 	u64 addr;
5729cf8febSAlex Vesker 	size_t length;
5829cf8febSAlex Vesker };
5929cf8febSAlex Vesker 
6029cf8febSAlex Vesker struct mlx5dr_icm_mr {
6129cf8febSAlex Vesker 	struct mlx5dr_icm_pool *pool;
6229cf8febSAlex Vesker 	struct mlx5_core_mkey mkey;
6329cf8febSAlex Vesker 	struct mlx5dr_icm_dm dm;
6429cf8febSAlex Vesker 	size_t used_length;
6529cf8febSAlex Vesker 	size_t length;
6629cf8febSAlex Vesker 	u64 icm_start_addr;
6729cf8febSAlex Vesker 	struct list_head mr_list;
6829cf8febSAlex Vesker };
6929cf8febSAlex Vesker 
7029cf8febSAlex Vesker static int dr_icm_create_dm_mkey(struct mlx5_core_dev *mdev,
7129cf8febSAlex Vesker 				 u32 pd, u64 length, u64 start_addr, int mode,
7229cf8febSAlex Vesker 				 struct mlx5_core_mkey *mkey)
7329cf8febSAlex Vesker {
7429cf8febSAlex Vesker 	u32 inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
7529cf8febSAlex Vesker 	u32 in[MLX5_ST_SZ_DW(create_mkey_in)] = {};
7629cf8febSAlex Vesker 	void *mkc;
7729cf8febSAlex Vesker 
7829cf8febSAlex Vesker 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
7929cf8febSAlex Vesker 
8029cf8febSAlex Vesker 	MLX5_SET(mkc, mkc, access_mode_1_0, mode);
8129cf8febSAlex Vesker 	MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7);
8229cf8febSAlex Vesker 	MLX5_SET(mkc, mkc, lw, 1);
8329cf8febSAlex Vesker 	MLX5_SET(mkc, mkc, lr, 1);
8429cf8febSAlex Vesker 	if (mode == MLX5_MKC_ACCESS_MODE_SW_ICM) {
8529cf8febSAlex Vesker 		MLX5_SET(mkc, mkc, rw, 1);
8629cf8febSAlex Vesker 		MLX5_SET(mkc, mkc, rr, 1);
8729cf8febSAlex Vesker 	}
8829cf8febSAlex Vesker 
8929cf8febSAlex Vesker 	MLX5_SET64(mkc, mkc, len, length);
9029cf8febSAlex Vesker 	MLX5_SET(mkc, mkc, pd, pd);
9129cf8febSAlex Vesker 	MLX5_SET(mkc, mkc, qpn, 0xffffff);
9229cf8febSAlex Vesker 	MLX5_SET64(mkc, mkc, start_addr, start_addr);
9329cf8febSAlex Vesker 
9429cf8febSAlex Vesker 	return mlx5_core_create_mkey(mdev, mkey, in, inlen);
9529cf8febSAlex Vesker }
9629cf8febSAlex Vesker 
9729cf8febSAlex Vesker static struct mlx5dr_icm_mr *
9829cf8febSAlex Vesker dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool,
9929cf8febSAlex Vesker 		      enum mlx5_sw_icm_type type,
10029cf8febSAlex Vesker 		      size_t align_base)
10129cf8febSAlex Vesker {
10229cf8febSAlex Vesker 	struct mlx5_core_dev *mdev = pool->dmn->mdev;
10329cf8febSAlex Vesker 	struct mlx5dr_icm_mr *icm_mr;
10429cf8febSAlex Vesker 	size_t align_diff;
10529cf8febSAlex Vesker 	int err;
10629cf8febSAlex Vesker 
10729cf8febSAlex Vesker 	icm_mr = kvzalloc(sizeof(*icm_mr), GFP_KERNEL);
10829cf8febSAlex Vesker 	if (!icm_mr)
10929cf8febSAlex Vesker 		return NULL;
11029cf8febSAlex Vesker 
11129cf8febSAlex Vesker 	icm_mr->pool = pool;
11229cf8febSAlex Vesker 	INIT_LIST_HEAD(&icm_mr->mr_list);
11329cf8febSAlex Vesker 
11429cf8febSAlex Vesker 	icm_mr->dm.type = type;
11529cf8febSAlex Vesker 
11629cf8febSAlex Vesker 	/* 2^log_biggest_table * entry-size * double-for-alignment */
11729cf8febSAlex Vesker 	icm_mr->dm.length = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
11829cf8febSAlex Vesker 							       pool->icm_type) * 2;
11929cf8febSAlex Vesker 
12029cf8febSAlex Vesker 	err = mlx5_dm_sw_icm_alloc(mdev, icm_mr->dm.type, icm_mr->dm.length, 0,
12129cf8febSAlex Vesker 				   &icm_mr->dm.addr, &icm_mr->dm.obj_id);
12229cf8febSAlex Vesker 	if (err) {
12329cf8febSAlex Vesker 		mlx5dr_err(pool->dmn, "Failed to allocate SW ICM memory, err (%d)\n", err);
12429cf8febSAlex Vesker 		goto free_icm_mr;
12529cf8febSAlex Vesker 	}
12629cf8febSAlex Vesker 
12729cf8febSAlex Vesker 	/* Register device memory */
12829cf8febSAlex Vesker 	err = dr_icm_create_dm_mkey(mdev, pool->dmn->pdn,
12929cf8febSAlex Vesker 				    icm_mr->dm.length,
13029cf8febSAlex Vesker 				    icm_mr->dm.addr,
13129cf8febSAlex Vesker 				    MLX5_MKC_ACCESS_MODE_SW_ICM,
13229cf8febSAlex Vesker 				    &icm_mr->mkey);
13329cf8febSAlex Vesker 	if (err) {
13429cf8febSAlex Vesker 		mlx5dr_err(pool->dmn, "Failed to create SW ICM MKEY, err (%d)\n", err);
13529cf8febSAlex Vesker 		goto free_dm;
13629cf8febSAlex Vesker 	}
13729cf8febSAlex Vesker 
13829cf8febSAlex Vesker 	icm_mr->icm_start_addr = icm_mr->dm.addr;
13929cf8febSAlex Vesker 
14029cf8febSAlex Vesker 	align_diff = icm_mr->icm_start_addr % align_base;
14129cf8febSAlex Vesker 	if (align_diff)
14229cf8febSAlex Vesker 		icm_mr->used_length = align_base - align_diff;
14329cf8febSAlex Vesker 
14429cf8febSAlex Vesker 	list_add_tail(&icm_mr->mr_list, &pool->icm_mr_list);
14529cf8febSAlex Vesker 
14629cf8febSAlex Vesker 	return icm_mr;
14729cf8febSAlex Vesker 
14829cf8febSAlex Vesker free_dm:
14929cf8febSAlex Vesker 	mlx5_dm_sw_icm_dealloc(mdev, icm_mr->dm.type, icm_mr->dm.length, 0,
15029cf8febSAlex Vesker 			       icm_mr->dm.addr, icm_mr->dm.obj_id);
15129cf8febSAlex Vesker free_icm_mr:
15229cf8febSAlex Vesker 	kvfree(icm_mr);
15329cf8febSAlex Vesker 	return NULL;
15429cf8febSAlex Vesker }
15529cf8febSAlex Vesker 
15629cf8febSAlex Vesker static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr)
15729cf8febSAlex Vesker {
15829cf8febSAlex Vesker 	struct mlx5_core_dev *mdev = icm_mr->pool->dmn->mdev;
15929cf8febSAlex Vesker 	struct mlx5dr_icm_dm *dm = &icm_mr->dm;
16029cf8febSAlex Vesker 
16129cf8febSAlex Vesker 	list_del(&icm_mr->mr_list);
16229cf8febSAlex Vesker 	mlx5_core_destroy_mkey(mdev, &icm_mr->mkey);
16329cf8febSAlex Vesker 	mlx5_dm_sw_icm_dealloc(mdev, dm->type, dm->length, 0,
16429cf8febSAlex Vesker 			       dm->addr, dm->obj_id);
16529cf8febSAlex Vesker 	kvfree(icm_mr);
16629cf8febSAlex Vesker }
16729cf8febSAlex Vesker 
16829cf8febSAlex Vesker static int dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk)
16929cf8febSAlex Vesker {
17029cf8febSAlex Vesker 	struct mlx5dr_icm_bucket *bucket = chunk->bucket;
17129cf8febSAlex Vesker 
17229cf8febSAlex Vesker 	chunk->ste_arr = kvzalloc(bucket->num_of_entries *
17329cf8febSAlex Vesker 				  sizeof(chunk->ste_arr[0]), GFP_KERNEL);
17429cf8febSAlex Vesker 	if (!chunk->ste_arr)
17529cf8febSAlex Vesker 		return -ENOMEM;
17629cf8febSAlex Vesker 
17729cf8febSAlex Vesker 	chunk->hw_ste_arr = kvzalloc(bucket->num_of_entries *
17829cf8febSAlex Vesker 				     DR_STE_SIZE_REDUCED, GFP_KERNEL);
17929cf8febSAlex Vesker 	if (!chunk->hw_ste_arr)
18029cf8febSAlex Vesker 		goto out_free_ste_arr;
18129cf8febSAlex Vesker 
18229cf8febSAlex Vesker 	chunk->miss_list = kvmalloc(bucket->num_of_entries *
18329cf8febSAlex Vesker 				    sizeof(chunk->miss_list[0]), GFP_KERNEL);
18429cf8febSAlex Vesker 	if (!chunk->miss_list)
18529cf8febSAlex Vesker 		goto out_free_hw_ste_arr;
18629cf8febSAlex Vesker 
18729cf8febSAlex Vesker 	return 0;
18829cf8febSAlex Vesker 
18929cf8febSAlex Vesker out_free_hw_ste_arr:
19029cf8febSAlex Vesker 	kvfree(chunk->hw_ste_arr);
19129cf8febSAlex Vesker out_free_ste_arr:
19229cf8febSAlex Vesker 	kvfree(chunk->ste_arr);
19329cf8febSAlex Vesker 	return -ENOMEM;
19429cf8febSAlex Vesker }
19529cf8febSAlex Vesker 
19629cf8febSAlex Vesker static int dr_icm_chunks_create(struct mlx5dr_icm_bucket *bucket)
19729cf8febSAlex Vesker {
19829cf8febSAlex Vesker 	size_t mr_free_size, mr_req_size, mr_row_size;
19929cf8febSAlex Vesker 	struct mlx5dr_icm_pool *pool = bucket->pool;
20029cf8febSAlex Vesker 	struct mlx5dr_icm_mr *icm_mr = NULL;
20129cf8febSAlex Vesker 	struct mlx5dr_icm_chunk *chunk;
20229cf8febSAlex Vesker 	enum mlx5_sw_icm_type dm_type;
20329cf8febSAlex Vesker 	size_t align_base;
20429cf8febSAlex Vesker 	int i, err = 0;
20529cf8febSAlex Vesker 
20629cf8febSAlex Vesker 	mr_req_size = bucket->num_of_entries * bucket->entry_size;
20729cf8febSAlex Vesker 	mr_row_size = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
20829cf8febSAlex Vesker 							 pool->icm_type);
20929cf8febSAlex Vesker 
21029cf8febSAlex Vesker 	if (pool->icm_type == DR_ICM_TYPE_STE) {
21129cf8febSAlex Vesker 		dm_type = MLX5_SW_ICM_TYPE_STEERING;
21229cf8febSAlex Vesker 		/* Align base is the biggest chunk size / row size */
21329cf8febSAlex Vesker 		align_base = mr_row_size;
21429cf8febSAlex Vesker 	} else {
21529cf8febSAlex Vesker 		dm_type = MLX5_SW_ICM_TYPE_HEADER_MODIFY;
21629cf8febSAlex Vesker 		/* Align base is 64B */
21729cf8febSAlex Vesker 		align_base = DR_ICM_MODIFY_HDR_ALIGN_BASE;
21829cf8febSAlex Vesker 	}
21929cf8febSAlex Vesker 
22029cf8febSAlex Vesker 	mutex_lock(&pool->mr_mutex);
22129cf8febSAlex Vesker 	if (!list_empty(&pool->icm_mr_list)) {
22229cf8febSAlex Vesker 		icm_mr = list_last_entry(&pool->icm_mr_list,
22329cf8febSAlex Vesker 					 struct mlx5dr_icm_mr, mr_list);
22429cf8febSAlex Vesker 
22529cf8febSAlex Vesker 		if (icm_mr)
22629cf8febSAlex Vesker 			mr_free_size = icm_mr->dm.length - icm_mr->used_length;
22729cf8febSAlex Vesker 	}
22829cf8febSAlex Vesker 
22929cf8febSAlex Vesker 	if (!icm_mr || mr_free_size < mr_row_size) {
23029cf8febSAlex Vesker 		icm_mr = dr_icm_pool_mr_create(pool, dm_type, align_base);
23129cf8febSAlex Vesker 		if (!icm_mr) {
23229cf8febSAlex Vesker 			err = -ENOMEM;
23329cf8febSAlex Vesker 			goto out_err;
23429cf8febSAlex Vesker 		}
23529cf8febSAlex Vesker 	}
23629cf8febSAlex Vesker 
23729cf8febSAlex Vesker 	/* Create memory aligned chunks */
23829cf8febSAlex Vesker 	for (i = 0; i < mr_row_size / mr_req_size; i++) {
23929cf8febSAlex Vesker 		chunk = kvzalloc(sizeof(*chunk), GFP_KERNEL);
24029cf8febSAlex Vesker 		if (!chunk) {
24129cf8febSAlex Vesker 			err = -ENOMEM;
24229cf8febSAlex Vesker 			goto out_err;
24329cf8febSAlex Vesker 		}
24429cf8febSAlex Vesker 
24529cf8febSAlex Vesker 		chunk->bucket = bucket;
24629cf8febSAlex Vesker 		chunk->rkey = icm_mr->mkey.key;
24729cf8febSAlex Vesker 		/* mr start addr is zero based */
24829cf8febSAlex Vesker 		chunk->mr_addr = icm_mr->used_length;
24929cf8febSAlex Vesker 		chunk->icm_addr = (uintptr_t)icm_mr->icm_start_addr + icm_mr->used_length;
25029cf8febSAlex Vesker 		icm_mr->used_length += mr_req_size;
25129cf8febSAlex Vesker 		chunk->num_of_entries = bucket->num_of_entries;
25229cf8febSAlex Vesker 		chunk->byte_size = chunk->num_of_entries * bucket->entry_size;
25329cf8febSAlex Vesker 
25429cf8febSAlex Vesker 		if (pool->icm_type == DR_ICM_TYPE_STE) {
25529cf8febSAlex Vesker 			err = dr_icm_chunk_ste_init(chunk);
25629cf8febSAlex Vesker 			if (err)
25729cf8febSAlex Vesker 				goto out_free_chunk;
25829cf8febSAlex Vesker 		}
25929cf8febSAlex Vesker 
26029cf8febSAlex Vesker 		INIT_LIST_HEAD(&chunk->chunk_list);
26129cf8febSAlex Vesker 		list_add(&chunk->chunk_list, &bucket->free_list);
26229cf8febSAlex Vesker 		bucket->free_list_count++;
26329cf8febSAlex Vesker 		bucket->total_chunks++;
26429cf8febSAlex Vesker 	}
26529cf8febSAlex Vesker 	mutex_unlock(&pool->mr_mutex);
26629cf8febSAlex Vesker 	return 0;
26729cf8febSAlex Vesker 
26829cf8febSAlex Vesker out_free_chunk:
26929cf8febSAlex Vesker 	kvfree(chunk);
27029cf8febSAlex Vesker out_err:
27129cf8febSAlex Vesker 	mutex_unlock(&pool->mr_mutex);
27229cf8febSAlex Vesker 	return err;
27329cf8febSAlex Vesker }
27429cf8febSAlex Vesker 
27529cf8febSAlex Vesker static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk)
27629cf8febSAlex Vesker {
27729cf8febSAlex Vesker 	kvfree(chunk->miss_list);
27829cf8febSAlex Vesker 	kvfree(chunk->hw_ste_arr);
27929cf8febSAlex Vesker 	kvfree(chunk->ste_arr);
28029cf8febSAlex Vesker }
28129cf8febSAlex Vesker 
28229cf8febSAlex Vesker static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk)
28329cf8febSAlex Vesker {
28429cf8febSAlex Vesker 	struct mlx5dr_icm_bucket *bucket = chunk->bucket;
28529cf8febSAlex Vesker 
28629cf8febSAlex Vesker 	list_del(&chunk->chunk_list);
28729cf8febSAlex Vesker 	bucket->total_chunks--;
28829cf8febSAlex Vesker 
28929cf8febSAlex Vesker 	if (bucket->pool->icm_type == DR_ICM_TYPE_STE)
29029cf8febSAlex Vesker 		dr_icm_chunk_ste_cleanup(chunk);
29129cf8febSAlex Vesker 
29229cf8febSAlex Vesker 	kvfree(chunk);
29329cf8febSAlex Vesker }
29429cf8febSAlex Vesker 
29529cf8febSAlex Vesker static void dr_icm_bucket_init(struct mlx5dr_icm_pool *pool,
29629cf8febSAlex Vesker 			       struct mlx5dr_icm_bucket *bucket,
29729cf8febSAlex Vesker 			       enum mlx5dr_icm_chunk_size chunk_size)
29829cf8febSAlex Vesker {
29929cf8febSAlex Vesker 	if (pool->icm_type == DR_ICM_TYPE_STE)
30029cf8febSAlex Vesker 		bucket->entry_size = DR_STE_SIZE;
30129cf8febSAlex Vesker 	else
30229cf8febSAlex Vesker 		bucket->entry_size = DR_MODIFY_ACTION_SIZE;
30329cf8febSAlex Vesker 
30429cf8febSAlex Vesker 	bucket->num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(chunk_size);
30529cf8febSAlex Vesker 	bucket->pool = pool;
30629cf8febSAlex Vesker 	mutex_init(&bucket->mutex);
30729cf8febSAlex Vesker 	INIT_LIST_HEAD(&bucket->free_list);
30829cf8febSAlex Vesker 	INIT_LIST_HEAD(&bucket->used_list);
30929cf8febSAlex Vesker 	INIT_LIST_HEAD(&bucket->hot_list);
31029cf8febSAlex Vesker 	INIT_LIST_HEAD(&bucket->sync_list);
31129cf8febSAlex Vesker }
31229cf8febSAlex Vesker 
31329cf8febSAlex Vesker static void dr_icm_bucket_cleanup(struct mlx5dr_icm_bucket *bucket)
31429cf8febSAlex Vesker {
31529cf8febSAlex Vesker 	struct mlx5dr_icm_chunk *chunk, *next;
31629cf8febSAlex Vesker 
31729cf8febSAlex Vesker 	mutex_destroy(&bucket->mutex);
31829cf8febSAlex Vesker 	list_splice_tail_init(&bucket->sync_list, &bucket->free_list);
31929cf8febSAlex Vesker 	list_splice_tail_init(&bucket->hot_list, &bucket->free_list);
32029cf8febSAlex Vesker 
32129cf8febSAlex Vesker 	list_for_each_entry_safe(chunk, next, &bucket->free_list, chunk_list)
32229cf8febSAlex Vesker 		dr_icm_chunk_destroy(chunk);
32329cf8febSAlex Vesker 
32429cf8febSAlex Vesker 	WARN_ON(bucket->total_chunks != 0);
32529cf8febSAlex Vesker 
32629cf8febSAlex Vesker 	/* Cleanup of unreturned chunks */
32729cf8febSAlex Vesker 	list_for_each_entry_safe(chunk, next, &bucket->used_list, chunk_list)
32829cf8febSAlex Vesker 		dr_icm_chunk_destroy(chunk);
32929cf8febSAlex Vesker }
33029cf8febSAlex Vesker 
33129cf8febSAlex Vesker static u64 dr_icm_hot_mem_size(struct mlx5dr_icm_pool *pool)
33229cf8febSAlex Vesker {
33329cf8febSAlex Vesker 	u64 hot_size = 0;
33429cf8febSAlex Vesker 	int chunk_order;
33529cf8febSAlex Vesker 
33629cf8febSAlex Vesker 	for (chunk_order = 0; chunk_order < pool->num_of_buckets; chunk_order++)
33729cf8febSAlex Vesker 		hot_size += pool->buckets[chunk_order].hot_list_count *
33829cf8febSAlex Vesker 			    mlx5dr_icm_pool_chunk_size_to_byte(chunk_order, pool->icm_type);
33929cf8febSAlex Vesker 
34029cf8febSAlex Vesker 	return hot_size;
34129cf8febSAlex Vesker }
34229cf8febSAlex Vesker 
34329cf8febSAlex Vesker static bool dr_icm_reuse_hot_entries(struct mlx5dr_icm_pool *pool,
34429cf8febSAlex Vesker 				     struct mlx5dr_icm_bucket *bucket)
34529cf8febSAlex Vesker {
34629cf8febSAlex Vesker 	u64 bytes_for_sync;
34729cf8febSAlex Vesker 
34829cf8febSAlex Vesker 	bytes_for_sync = dr_icm_hot_mem_size(pool);
34929cf8febSAlex Vesker 	if (bytes_for_sync < DR_ICM_SYNC_THRESHOLD || !bucket->hot_list_count)
35029cf8febSAlex Vesker 		return false;
35129cf8febSAlex Vesker 
35229cf8febSAlex Vesker 	return true;
35329cf8febSAlex Vesker }
35429cf8febSAlex Vesker 
35529cf8febSAlex Vesker static void dr_icm_chill_bucket_start(struct mlx5dr_icm_bucket *bucket)
35629cf8febSAlex Vesker {
35729cf8febSAlex Vesker 	list_splice_tail_init(&bucket->hot_list, &bucket->sync_list);
35829cf8febSAlex Vesker 	bucket->sync_list_count += bucket->hot_list_count;
35929cf8febSAlex Vesker 	bucket->hot_list_count = 0;
36029cf8febSAlex Vesker }
36129cf8febSAlex Vesker 
36229cf8febSAlex Vesker static void dr_icm_chill_bucket_end(struct mlx5dr_icm_bucket *bucket)
36329cf8febSAlex Vesker {
36429cf8febSAlex Vesker 	list_splice_tail_init(&bucket->sync_list, &bucket->free_list);
36529cf8febSAlex Vesker 	bucket->free_list_count += bucket->sync_list_count;
36629cf8febSAlex Vesker 	bucket->sync_list_count = 0;
36729cf8febSAlex Vesker }
36829cf8febSAlex Vesker 
36929cf8febSAlex Vesker static void dr_icm_chill_bucket_abort(struct mlx5dr_icm_bucket *bucket)
37029cf8febSAlex Vesker {
37129cf8febSAlex Vesker 	list_splice_tail_init(&bucket->sync_list, &bucket->hot_list);
37229cf8febSAlex Vesker 	bucket->hot_list_count += bucket->sync_list_count;
37329cf8febSAlex Vesker 	bucket->sync_list_count = 0;
37429cf8febSAlex Vesker }
37529cf8febSAlex Vesker 
37629cf8febSAlex Vesker static void dr_icm_chill_buckets_start(struct mlx5dr_icm_pool *pool,
37729cf8febSAlex Vesker 				       struct mlx5dr_icm_bucket *cb,
37829cf8febSAlex Vesker 				       bool buckets[DR_CHUNK_SIZE_MAX])
37929cf8febSAlex Vesker {
38029cf8febSAlex Vesker 	struct mlx5dr_icm_bucket *bucket;
38129cf8febSAlex Vesker 	int i;
38229cf8febSAlex Vesker 
38329cf8febSAlex Vesker 	for (i = 0; i < pool->num_of_buckets; i++) {
38429cf8febSAlex Vesker 		bucket = &pool->buckets[i];
38529cf8febSAlex Vesker 		if (bucket == cb) {
38629cf8febSAlex Vesker 			dr_icm_chill_bucket_start(bucket);
38729cf8febSAlex Vesker 			continue;
38829cf8febSAlex Vesker 		}
38929cf8febSAlex Vesker 
39029cf8febSAlex Vesker 		/* Freeing the mutex is done at the end of that process, after
39129cf8febSAlex Vesker 		 * sync_ste was executed at dr_icm_chill_buckets_end func.
39229cf8febSAlex Vesker 		 */
39329cf8febSAlex Vesker 		if (mutex_trylock(&bucket->mutex)) {
39429cf8febSAlex Vesker 			dr_icm_chill_bucket_start(bucket);
39529cf8febSAlex Vesker 			buckets[i] = true;
39629cf8febSAlex Vesker 		}
39729cf8febSAlex Vesker 	}
39829cf8febSAlex Vesker }
39929cf8febSAlex Vesker 
40029cf8febSAlex Vesker static void dr_icm_chill_buckets_end(struct mlx5dr_icm_pool *pool,
40129cf8febSAlex Vesker 				     struct mlx5dr_icm_bucket *cb,
40229cf8febSAlex Vesker 				     bool buckets[DR_CHUNK_SIZE_MAX])
40329cf8febSAlex Vesker {
40429cf8febSAlex Vesker 	struct mlx5dr_icm_bucket *bucket;
40529cf8febSAlex Vesker 	int i;
40629cf8febSAlex Vesker 
40729cf8febSAlex Vesker 	for (i = 0; i < pool->num_of_buckets; i++) {
40829cf8febSAlex Vesker 		bucket = &pool->buckets[i];
40929cf8febSAlex Vesker 		if (bucket == cb) {
41029cf8febSAlex Vesker 			dr_icm_chill_bucket_end(bucket);
41129cf8febSAlex Vesker 			continue;
41229cf8febSAlex Vesker 		}
41329cf8febSAlex Vesker 
41429cf8febSAlex Vesker 		if (!buckets[i])
41529cf8febSAlex Vesker 			continue;
41629cf8febSAlex Vesker 
41729cf8febSAlex Vesker 		dr_icm_chill_bucket_end(bucket);
41829cf8febSAlex Vesker 		mutex_unlock(&bucket->mutex);
41929cf8febSAlex Vesker 	}
42029cf8febSAlex Vesker }
42129cf8febSAlex Vesker 
42229cf8febSAlex Vesker static void dr_icm_chill_buckets_abort(struct mlx5dr_icm_pool *pool,
42329cf8febSAlex Vesker 				       struct mlx5dr_icm_bucket *cb,
42429cf8febSAlex Vesker 				       bool buckets[DR_CHUNK_SIZE_MAX])
42529cf8febSAlex Vesker {
42629cf8febSAlex Vesker 	struct mlx5dr_icm_bucket *bucket;
42729cf8febSAlex Vesker 	int i;
42829cf8febSAlex Vesker 
42929cf8febSAlex Vesker 	for (i = 0; i < pool->num_of_buckets; i++) {
43029cf8febSAlex Vesker 		bucket = &pool->buckets[i];
43129cf8febSAlex Vesker 		if (bucket == cb) {
43229cf8febSAlex Vesker 			dr_icm_chill_bucket_abort(bucket);
43329cf8febSAlex Vesker 			continue;
43429cf8febSAlex Vesker 		}
43529cf8febSAlex Vesker 
43629cf8febSAlex Vesker 		if (!buckets[i])
43729cf8febSAlex Vesker 			continue;
43829cf8febSAlex Vesker 
43929cf8febSAlex Vesker 		dr_icm_chill_bucket_abort(bucket);
44029cf8febSAlex Vesker 		mutex_unlock(&bucket->mutex);
44129cf8febSAlex Vesker 	}
44229cf8febSAlex Vesker }
44329cf8febSAlex Vesker 
44429cf8febSAlex Vesker /* Allocate an ICM chunk, each chunk holds a piece of ICM memory and
44529cf8febSAlex Vesker  * also memory used for HW STE management for optimizations.
44629cf8febSAlex Vesker  */
44729cf8febSAlex Vesker struct mlx5dr_icm_chunk *
44829cf8febSAlex Vesker mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
44929cf8febSAlex Vesker 		       enum mlx5dr_icm_chunk_size chunk_size)
45029cf8febSAlex Vesker {
45129cf8febSAlex Vesker 	struct mlx5dr_icm_chunk *chunk = NULL; /* Fix compilation warning */
45229cf8febSAlex Vesker 	bool buckets[DR_CHUNK_SIZE_MAX] = {};
45329cf8febSAlex Vesker 	struct mlx5dr_icm_bucket *bucket;
45429cf8febSAlex Vesker 	int err;
45529cf8febSAlex Vesker 
45629cf8febSAlex Vesker 	if (chunk_size > pool->max_log_chunk_sz)
45729cf8febSAlex Vesker 		return NULL;
45829cf8febSAlex Vesker 
45929cf8febSAlex Vesker 	bucket = &pool->buckets[chunk_size];
46029cf8febSAlex Vesker 
46129cf8febSAlex Vesker 	mutex_lock(&bucket->mutex);
46229cf8febSAlex Vesker 
46329cf8febSAlex Vesker 	/* Take chunk from pool if available, otherwise allocate new chunks */
46429cf8febSAlex Vesker 	if (list_empty(&bucket->free_list)) {
46529cf8febSAlex Vesker 		if (dr_icm_reuse_hot_entries(pool, bucket)) {
46629cf8febSAlex Vesker 			dr_icm_chill_buckets_start(pool, bucket, buckets);
46729cf8febSAlex Vesker 			err = mlx5dr_cmd_sync_steering(pool->dmn->mdev);
46829cf8febSAlex Vesker 			if (err) {
46929cf8febSAlex Vesker 				dr_icm_chill_buckets_abort(pool, bucket, buckets);
47029cf8febSAlex Vesker 				mlx5dr_dbg(pool->dmn, "Sync_steering failed\n");
47129cf8febSAlex Vesker 				chunk = NULL;
47229cf8febSAlex Vesker 				goto out;
47329cf8febSAlex Vesker 			}
47429cf8febSAlex Vesker 			dr_icm_chill_buckets_end(pool, bucket, buckets);
47529cf8febSAlex Vesker 		} else {
47629cf8febSAlex Vesker 			dr_icm_chunks_create(bucket);
47729cf8febSAlex Vesker 		}
47829cf8febSAlex Vesker 	}
47929cf8febSAlex Vesker 
48029cf8febSAlex Vesker 	if (!list_empty(&bucket->free_list)) {
48129cf8febSAlex Vesker 		chunk = list_last_entry(&bucket->free_list,
48229cf8febSAlex Vesker 					struct mlx5dr_icm_chunk,
48329cf8febSAlex Vesker 					chunk_list);
48429cf8febSAlex Vesker 		if (chunk) {
48529cf8febSAlex Vesker 			list_del_init(&chunk->chunk_list);
48629cf8febSAlex Vesker 			list_add_tail(&chunk->chunk_list, &bucket->used_list);
48729cf8febSAlex Vesker 			bucket->free_list_count--;
48829cf8febSAlex Vesker 			bucket->used_list_count++;
48929cf8febSAlex Vesker 		}
49029cf8febSAlex Vesker 	}
49129cf8febSAlex Vesker out:
49229cf8febSAlex Vesker 	mutex_unlock(&bucket->mutex);
49329cf8febSAlex Vesker 	return chunk;
49429cf8febSAlex Vesker }
49529cf8febSAlex Vesker 
49629cf8febSAlex Vesker void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk)
49729cf8febSAlex Vesker {
49829cf8febSAlex Vesker 	struct mlx5dr_icm_bucket *bucket = chunk->bucket;
49929cf8febSAlex Vesker 
50029cf8febSAlex Vesker 	if (bucket->pool->icm_type == DR_ICM_TYPE_STE) {
50129cf8febSAlex Vesker 		memset(chunk->ste_arr, 0,
50229cf8febSAlex Vesker 		       bucket->num_of_entries * sizeof(chunk->ste_arr[0]));
50329cf8febSAlex Vesker 		memset(chunk->hw_ste_arr, 0,
50429cf8febSAlex Vesker 		       bucket->num_of_entries * DR_STE_SIZE_REDUCED);
50529cf8febSAlex Vesker 	}
50629cf8febSAlex Vesker 
50729cf8febSAlex Vesker 	mutex_lock(&bucket->mutex);
50829cf8febSAlex Vesker 	list_del_init(&chunk->chunk_list);
50929cf8febSAlex Vesker 	list_add_tail(&chunk->chunk_list, &bucket->hot_list);
51029cf8febSAlex Vesker 	bucket->hot_list_count++;
51129cf8febSAlex Vesker 	bucket->used_list_count--;
51229cf8febSAlex Vesker 	mutex_unlock(&bucket->mutex);
51329cf8febSAlex Vesker }
51429cf8febSAlex Vesker 
51529cf8febSAlex Vesker struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
51629cf8febSAlex Vesker 					       enum mlx5dr_icm_type icm_type)
51729cf8febSAlex Vesker {
51829cf8febSAlex Vesker 	enum mlx5dr_icm_chunk_size max_log_chunk_sz;
51929cf8febSAlex Vesker 	struct mlx5dr_icm_pool *pool;
52029cf8febSAlex Vesker 	int i;
52129cf8febSAlex Vesker 
52229cf8febSAlex Vesker 	if (icm_type == DR_ICM_TYPE_STE)
52329cf8febSAlex Vesker 		max_log_chunk_sz = dmn->info.max_log_sw_icm_sz;
52429cf8febSAlex Vesker 	else
52529cf8febSAlex Vesker 		max_log_chunk_sz = dmn->info.max_log_action_icm_sz;
52629cf8febSAlex Vesker 
52729cf8febSAlex Vesker 	pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
52829cf8febSAlex Vesker 	if (!pool)
52929cf8febSAlex Vesker 		return NULL;
53029cf8febSAlex Vesker 
53129cf8febSAlex Vesker 	pool->buckets = kcalloc(max_log_chunk_sz + 1,
53229cf8febSAlex Vesker 				sizeof(pool->buckets[0]),
53329cf8febSAlex Vesker 				GFP_KERNEL);
53429cf8febSAlex Vesker 	if (!pool->buckets)
53529cf8febSAlex Vesker 		goto free_pool;
53629cf8febSAlex Vesker 
53729cf8febSAlex Vesker 	pool->dmn = dmn;
53829cf8febSAlex Vesker 	pool->icm_type = icm_type;
53929cf8febSAlex Vesker 	pool->max_log_chunk_sz = max_log_chunk_sz;
54029cf8febSAlex Vesker 	pool->num_of_buckets = max_log_chunk_sz + 1;
54129cf8febSAlex Vesker 	INIT_LIST_HEAD(&pool->icm_mr_list);
54229cf8febSAlex Vesker 
54329cf8febSAlex Vesker 	for (i = 0; i < pool->num_of_buckets; i++)
54429cf8febSAlex Vesker 		dr_icm_bucket_init(pool, &pool->buckets[i], i);
54529cf8febSAlex Vesker 
54629cf8febSAlex Vesker 	mutex_init(&pool->mr_mutex);
54729cf8febSAlex Vesker 
54829cf8febSAlex Vesker 	return pool;
54929cf8febSAlex Vesker 
55029cf8febSAlex Vesker free_pool:
55129cf8febSAlex Vesker 	kvfree(pool);
55229cf8febSAlex Vesker 	return NULL;
55329cf8febSAlex Vesker }
55429cf8febSAlex Vesker 
55529cf8febSAlex Vesker void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool)
55629cf8febSAlex Vesker {
55729cf8febSAlex Vesker 	struct mlx5dr_icm_mr *icm_mr, *next;
55829cf8febSAlex Vesker 	int i;
55929cf8febSAlex Vesker 
56029cf8febSAlex Vesker 	mutex_destroy(&pool->mr_mutex);
56129cf8febSAlex Vesker 
56229cf8febSAlex Vesker 	list_for_each_entry_safe(icm_mr, next, &pool->icm_mr_list, mr_list)
56329cf8febSAlex Vesker 		dr_icm_pool_mr_destroy(icm_mr);
56429cf8febSAlex Vesker 
56529cf8febSAlex Vesker 	for (i = 0; i < pool->num_of_buckets; i++)
56629cf8febSAlex Vesker 		dr_icm_bucket_cleanup(&pool->buckets[i]);
56729cf8febSAlex Vesker 
56829cf8febSAlex Vesker 	kfree(pool->buckets);
56929cf8febSAlex Vesker 	kvfree(pool);
57029cf8febSAlex Vesker }
571