129cf8febSAlex Vesker // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
229cf8febSAlex Vesker /* Copyright (c) 2019 Mellanox Technologies. */
329cf8febSAlex Vesker 
429cf8febSAlex Vesker #include "dr_types.h"
529cf8febSAlex Vesker 
629cf8febSAlex Vesker #define DR_ICM_MODIFY_HDR_ALIGN_BASE 64
729cf8febSAlex Vesker 
829cf8febSAlex Vesker struct mlx5dr_icm_pool {
929cf8febSAlex Vesker 	enum mlx5dr_icm_type icm_type;
1029cf8febSAlex Vesker 	enum mlx5dr_icm_chunk_size max_log_chunk_sz;
1129cf8febSAlex Vesker 	struct mlx5dr_domain *dmn;
12a00cd878SYevgeny Kliteynik 	/* memory management */
13a00cd878SYevgeny Kliteynik 	struct mutex mutex; /* protect the ICM pool and ICM buddy */
14a00cd878SYevgeny Kliteynik 	struct list_head buddy_mem_list;
151c586514SYevgeny Kliteynik 	u64 hot_memory_size;
1629cf8febSAlex Vesker };
1729cf8febSAlex Vesker 
1829cf8febSAlex Vesker struct mlx5dr_icm_dm {
1929cf8febSAlex Vesker 	u32 obj_id;
2029cf8febSAlex Vesker 	enum mlx5_sw_icm_type type;
21334a306fSNathan Chancellor 	phys_addr_t addr;
2229cf8febSAlex Vesker 	size_t length;
2329cf8febSAlex Vesker };
2429cf8febSAlex Vesker 
2529cf8febSAlex Vesker struct mlx5dr_icm_mr {
2683fec3f1SAharon Landau 	u32 mkey;
2729cf8febSAlex Vesker 	struct mlx5dr_icm_dm dm;
28a00cd878SYevgeny Kliteynik 	struct mlx5dr_domain *dmn;
2929cf8febSAlex Vesker 	size_t length;
3029cf8febSAlex Vesker 	u64 icm_start_addr;
3129cf8febSAlex Vesker };
3229cf8febSAlex Vesker 
3329cf8febSAlex Vesker static int dr_icm_create_dm_mkey(struct mlx5_core_dev *mdev,
3429cf8febSAlex Vesker 				 u32 pd, u64 length, u64 start_addr, int mode,
3583fec3f1SAharon Landau 				 u32 *mkey)
3629cf8febSAlex Vesker {
3729cf8febSAlex Vesker 	u32 inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
3829cf8febSAlex Vesker 	u32 in[MLX5_ST_SZ_DW(create_mkey_in)] = {};
3929cf8febSAlex Vesker 	void *mkc;
4029cf8febSAlex Vesker 
4129cf8febSAlex Vesker 	mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
4229cf8febSAlex Vesker 
4329cf8febSAlex Vesker 	MLX5_SET(mkc, mkc, access_mode_1_0, mode);
4429cf8febSAlex Vesker 	MLX5_SET(mkc, mkc, access_mode_4_2, (mode >> 2) & 0x7);
4529cf8febSAlex Vesker 	MLX5_SET(mkc, mkc, lw, 1);
4629cf8febSAlex Vesker 	MLX5_SET(mkc, mkc, lr, 1);
4729cf8febSAlex Vesker 	if (mode == MLX5_MKC_ACCESS_MODE_SW_ICM) {
4829cf8febSAlex Vesker 		MLX5_SET(mkc, mkc, rw, 1);
4929cf8febSAlex Vesker 		MLX5_SET(mkc, mkc, rr, 1);
5029cf8febSAlex Vesker 	}
5129cf8febSAlex Vesker 
5229cf8febSAlex Vesker 	MLX5_SET64(mkc, mkc, len, length);
5329cf8febSAlex Vesker 	MLX5_SET(mkc, mkc, pd, pd);
5429cf8febSAlex Vesker 	MLX5_SET(mkc, mkc, qpn, 0xffffff);
5529cf8febSAlex Vesker 	MLX5_SET64(mkc, mkc, start_addr, start_addr);
5629cf8febSAlex Vesker 
5729cf8febSAlex Vesker 	return mlx5_core_create_mkey(mdev, mkey, in, inlen);
5829cf8febSAlex Vesker }
5929cf8febSAlex Vesker 
60003f4f9aSRongwei Liu u64 mlx5dr_icm_pool_get_chunk_mr_addr(struct mlx5dr_icm_chunk *chunk)
61003f4f9aSRongwei Liu {
62003f4f9aSRongwei Liu 	u32 offset = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type);
63003f4f9aSRongwei Liu 
64003f4f9aSRongwei Liu 	return (u64)offset * chunk->seg;
65003f4f9aSRongwei Liu }
66003f4f9aSRongwei Liu 
67003f4f9aSRongwei Liu u32 mlx5dr_icm_pool_get_chunk_rkey(struct mlx5dr_icm_chunk *chunk)
68003f4f9aSRongwei Liu {
69003f4f9aSRongwei Liu 	return chunk->buddy_mem->icm_mr->mkey;
70003f4f9aSRongwei Liu }
71003f4f9aSRongwei Liu 
725c4f9b6eSRongwei Liu u64 mlx5dr_icm_pool_get_chunk_icm_addr(struct mlx5dr_icm_chunk *chunk)
735c4f9b6eSRongwei Liu {
745c4f9b6eSRongwei Liu 	u32 size = mlx5dr_icm_pool_dm_type_to_entry_size(chunk->buddy_mem->pool->icm_type);
755c4f9b6eSRongwei Liu 
765c4f9b6eSRongwei Liu 	return (u64)chunk->buddy_mem->icm_mr->icm_start_addr + size * chunk->seg;
775c4f9b6eSRongwei Liu }
785c4f9b6eSRongwei Liu 
79*f51bb517SRongwei Liu u32 mlx5dr_icm_pool_get_chunk_byte_size(struct mlx5dr_icm_chunk *chunk)
80*f51bb517SRongwei Liu {
81*f51bb517SRongwei Liu 	return mlx5dr_icm_pool_chunk_size_to_byte(chunk->size,
82*f51bb517SRongwei Liu 			chunk->buddy_mem->pool->icm_type);
83*f51bb517SRongwei Liu }
84*f51bb517SRongwei Liu 
85*f51bb517SRongwei Liu u32 mlx5dr_icm_pool_get_chunk_num_of_entries(struct mlx5dr_icm_chunk *chunk)
86*f51bb517SRongwei Liu {
87*f51bb517SRongwei Liu 	return mlx5dr_icm_pool_chunk_size_to_entries(chunk->size);
88*f51bb517SRongwei Liu }
89*f51bb517SRongwei Liu 
9029cf8febSAlex Vesker static struct mlx5dr_icm_mr *
91dff8e2d1SErez Shitrit dr_icm_pool_mr_create(struct mlx5dr_icm_pool *pool)
9229cf8febSAlex Vesker {
9329cf8febSAlex Vesker 	struct mlx5_core_dev *mdev = pool->dmn->mdev;
94dff8e2d1SErez Shitrit 	enum mlx5_sw_icm_type dm_type;
9529cf8febSAlex Vesker 	struct mlx5dr_icm_mr *icm_mr;
96dff8e2d1SErez Shitrit 	size_t log_align_base;
9729cf8febSAlex Vesker 	int err;
9829cf8febSAlex Vesker 
9929cf8febSAlex Vesker 	icm_mr = kvzalloc(sizeof(*icm_mr), GFP_KERNEL);
10029cf8febSAlex Vesker 	if (!icm_mr)
10129cf8febSAlex Vesker 		return NULL;
10229cf8febSAlex Vesker 
103a00cd878SYevgeny Kliteynik 	icm_mr->dmn = pool->dmn;
10429cf8febSAlex Vesker 
10529cf8febSAlex Vesker 	icm_mr->dm.length = mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
106dff8e2d1SErez Shitrit 							       pool->icm_type);
10729cf8febSAlex Vesker 
108dff8e2d1SErez Shitrit 	if (pool->icm_type == DR_ICM_TYPE_STE) {
109dff8e2d1SErez Shitrit 		dm_type = MLX5_SW_ICM_TYPE_STEERING;
110dff8e2d1SErez Shitrit 		log_align_base = ilog2(icm_mr->dm.length);
111dff8e2d1SErez Shitrit 	} else {
112dff8e2d1SErez Shitrit 		dm_type = MLX5_SW_ICM_TYPE_HEADER_MODIFY;
113dff8e2d1SErez Shitrit 		/* Align base is 64B */
114dff8e2d1SErez Shitrit 		log_align_base = ilog2(DR_ICM_MODIFY_HDR_ALIGN_BASE);
115dff8e2d1SErez Shitrit 	}
116dff8e2d1SErez Shitrit 	icm_mr->dm.type = dm_type;
117dff8e2d1SErez Shitrit 
118dff8e2d1SErez Shitrit 	err = mlx5_dm_sw_icm_alloc(mdev, icm_mr->dm.type, icm_mr->dm.length,
119dff8e2d1SErez Shitrit 				   log_align_base, 0, &icm_mr->dm.addr,
120dff8e2d1SErez Shitrit 				   &icm_mr->dm.obj_id);
12129cf8febSAlex Vesker 	if (err) {
12229cf8febSAlex Vesker 		mlx5dr_err(pool->dmn, "Failed to allocate SW ICM memory, err (%d)\n", err);
12329cf8febSAlex Vesker 		goto free_icm_mr;
12429cf8febSAlex Vesker 	}
12529cf8febSAlex Vesker 
12629cf8febSAlex Vesker 	/* Register device memory */
12729cf8febSAlex Vesker 	err = dr_icm_create_dm_mkey(mdev, pool->dmn->pdn,
12829cf8febSAlex Vesker 				    icm_mr->dm.length,
12929cf8febSAlex Vesker 				    icm_mr->dm.addr,
13029cf8febSAlex Vesker 				    MLX5_MKC_ACCESS_MODE_SW_ICM,
13129cf8febSAlex Vesker 				    &icm_mr->mkey);
13229cf8febSAlex Vesker 	if (err) {
13329cf8febSAlex Vesker 		mlx5dr_err(pool->dmn, "Failed to create SW ICM MKEY, err (%d)\n", err);
13429cf8febSAlex Vesker 		goto free_dm;
13529cf8febSAlex Vesker 	}
13629cf8febSAlex Vesker 
13729cf8febSAlex Vesker 	icm_mr->icm_start_addr = icm_mr->dm.addr;
13829cf8febSAlex Vesker 
139dff8e2d1SErez Shitrit 	if (icm_mr->icm_start_addr & (BIT(log_align_base) - 1)) {
140dff8e2d1SErez Shitrit 		mlx5dr_err(pool->dmn, "Failed to get Aligned ICM mem (asked: %zu)\n",
141dff8e2d1SErez Shitrit 			   log_align_base);
142dff8e2d1SErez Shitrit 		goto free_mkey;
143dff8e2d1SErez Shitrit 	}
14429cf8febSAlex Vesker 
14529cf8febSAlex Vesker 	return icm_mr;
14629cf8febSAlex Vesker 
147dff8e2d1SErez Shitrit free_mkey:
14883fec3f1SAharon Landau 	mlx5_core_destroy_mkey(mdev, icm_mr->mkey);
14929cf8febSAlex Vesker free_dm:
15029cf8febSAlex Vesker 	mlx5_dm_sw_icm_dealloc(mdev, icm_mr->dm.type, icm_mr->dm.length, 0,
15129cf8febSAlex Vesker 			       icm_mr->dm.addr, icm_mr->dm.obj_id);
15229cf8febSAlex Vesker free_icm_mr:
15329cf8febSAlex Vesker 	kvfree(icm_mr);
15429cf8febSAlex Vesker 	return NULL;
15529cf8febSAlex Vesker }
15629cf8febSAlex Vesker 
15729cf8febSAlex Vesker static void dr_icm_pool_mr_destroy(struct mlx5dr_icm_mr *icm_mr)
15829cf8febSAlex Vesker {
159a00cd878SYevgeny Kliteynik 	struct mlx5_core_dev *mdev = icm_mr->dmn->mdev;
16029cf8febSAlex Vesker 	struct mlx5dr_icm_dm *dm = &icm_mr->dm;
16129cf8febSAlex Vesker 
16283fec3f1SAharon Landau 	mlx5_core_destroy_mkey(mdev, icm_mr->mkey);
16329cf8febSAlex Vesker 	mlx5_dm_sw_icm_dealloc(mdev, dm->type, dm->length, 0,
16429cf8febSAlex Vesker 			       dm->addr, dm->obj_id);
16529cf8febSAlex Vesker 	kvfree(icm_mr);
16629cf8febSAlex Vesker }
16729cf8febSAlex Vesker 
168e5b2bc30SYevgeny Kliteynik static int dr_icm_buddy_get_ste_size(struct mlx5dr_icm_buddy_mem *buddy)
16929cf8febSAlex Vesker {
170e5b2bc30SYevgeny Kliteynik 	/* We support only one type of STE size, both for ConnectX-5 and later
171e5b2bc30SYevgeny Kliteynik 	 * devices. Once the support for match STE which has a larger tag is
172e5b2bc30SYevgeny Kliteynik 	 * added (32B instead of 16B), the STE size for devices later than
173e5b2bc30SYevgeny Kliteynik 	 * ConnectX-5 needs to account for that.
174e5b2bc30SYevgeny Kliteynik 	 */
175e5b2bc30SYevgeny Kliteynik 	return DR_STE_SIZE_REDUCED;
176e5b2bc30SYevgeny Kliteynik }
17729cf8febSAlex Vesker 
178e5b2bc30SYevgeny Kliteynik static void dr_icm_chunk_ste_init(struct mlx5dr_icm_chunk *chunk, int offset)
179e5b2bc30SYevgeny Kliteynik {
180e5b2bc30SYevgeny Kliteynik 	struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
181e5b2bc30SYevgeny Kliteynik 	int index = offset / DR_STE_SIZE;
18229cf8febSAlex Vesker 
183e5b2bc30SYevgeny Kliteynik 	chunk->ste_arr = &buddy->ste_arr[index];
184e5b2bc30SYevgeny Kliteynik 	chunk->miss_list = &buddy->miss_list[index];
185e5b2bc30SYevgeny Kliteynik 	chunk->hw_ste_arr = buddy->hw_ste_arr +
186e5b2bc30SYevgeny Kliteynik 			    index * dr_icm_buddy_get_ste_size(buddy);
18729cf8febSAlex Vesker }
18829cf8febSAlex Vesker 
18929cf8febSAlex Vesker static void dr_icm_chunk_ste_cleanup(struct mlx5dr_icm_chunk *chunk)
19029cf8febSAlex Vesker {
191*f51bb517SRongwei Liu 	int num_of_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
192e5b2bc30SYevgeny Kliteynik 	struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
193e5b2bc30SYevgeny Kliteynik 
194e5b2bc30SYevgeny Kliteynik 	memset(chunk->hw_ste_arr, 0,
195*f51bb517SRongwei Liu 	       num_of_entries * dr_icm_buddy_get_ste_size(buddy));
196e5b2bc30SYevgeny Kliteynik 	memset(chunk->ste_arr, 0,
197*f51bb517SRongwei Liu 	       num_of_entries * sizeof(chunk->ste_arr[0]));
19829cf8febSAlex Vesker }
19929cf8febSAlex Vesker 
200a00cd878SYevgeny Kliteynik static enum mlx5dr_icm_type
201a00cd878SYevgeny Kliteynik get_chunk_icm_type(struct mlx5dr_icm_chunk *chunk)
202a00cd878SYevgeny Kliteynik {
203a00cd878SYevgeny Kliteynik 	return chunk->buddy_mem->pool->icm_type;
204a00cd878SYevgeny Kliteynik }
205a00cd878SYevgeny Kliteynik 
206284836d9SYevgeny Kliteynik static void dr_icm_chunk_destroy(struct mlx5dr_icm_chunk *chunk,
207284836d9SYevgeny Kliteynik 				 struct mlx5dr_icm_buddy_mem *buddy)
20829cf8febSAlex Vesker {
209a00cd878SYevgeny Kliteynik 	enum mlx5dr_icm_type icm_type = get_chunk_icm_type(chunk);
21029cf8febSAlex Vesker 
211*f51bb517SRongwei Liu 	buddy->used_memory -= mlx5dr_icm_pool_get_chunk_byte_size(chunk);
21229cf8febSAlex Vesker 	list_del(&chunk->chunk_list);
21329cf8febSAlex Vesker 
214a00cd878SYevgeny Kliteynik 	if (icm_type == DR_ICM_TYPE_STE)
21529cf8febSAlex Vesker 		dr_icm_chunk_ste_cleanup(chunk);
21629cf8febSAlex Vesker 
21729cf8febSAlex Vesker 	kvfree(chunk);
21829cf8febSAlex Vesker }
21929cf8febSAlex Vesker 
220e5b2bc30SYevgeny Kliteynik static int dr_icm_buddy_init_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
221e5b2bc30SYevgeny Kliteynik {
222e5b2bc30SYevgeny Kliteynik 	int num_of_entries =
223e5b2bc30SYevgeny Kliteynik 		mlx5dr_icm_pool_chunk_size_to_entries(buddy->pool->max_log_chunk_sz);
224e5b2bc30SYevgeny Kliteynik 
225e5b2bc30SYevgeny Kliteynik 	buddy->ste_arr = kvcalloc(num_of_entries,
226e5b2bc30SYevgeny Kliteynik 				  sizeof(struct mlx5dr_ste), GFP_KERNEL);
227e5b2bc30SYevgeny Kliteynik 	if (!buddy->ste_arr)
228e5b2bc30SYevgeny Kliteynik 		return -ENOMEM;
229e5b2bc30SYevgeny Kliteynik 
230e5b2bc30SYevgeny Kliteynik 	/* Preallocate full STE size on non-ConnectX-5 devices since
231e5b2bc30SYevgeny Kliteynik 	 * we need to support both full and reduced with the same cache.
232e5b2bc30SYevgeny Kliteynik 	 */
233e5b2bc30SYevgeny Kliteynik 	buddy->hw_ste_arr = kvcalloc(num_of_entries,
234e5b2bc30SYevgeny Kliteynik 				     dr_icm_buddy_get_ste_size(buddy), GFP_KERNEL);
235e5b2bc30SYevgeny Kliteynik 	if (!buddy->hw_ste_arr)
236e5b2bc30SYevgeny Kliteynik 		goto free_ste_arr;
237e5b2bc30SYevgeny Kliteynik 
238e5b2bc30SYevgeny Kliteynik 	buddy->miss_list = kvmalloc(num_of_entries * sizeof(struct list_head), GFP_KERNEL);
239e5b2bc30SYevgeny Kliteynik 	if (!buddy->miss_list)
240e5b2bc30SYevgeny Kliteynik 		goto free_hw_ste_arr;
241e5b2bc30SYevgeny Kliteynik 
242e5b2bc30SYevgeny Kliteynik 	return 0;
243e5b2bc30SYevgeny Kliteynik 
244e5b2bc30SYevgeny Kliteynik free_hw_ste_arr:
245e5b2bc30SYevgeny Kliteynik 	kvfree(buddy->hw_ste_arr);
246e5b2bc30SYevgeny Kliteynik free_ste_arr:
247e5b2bc30SYevgeny Kliteynik 	kvfree(buddy->ste_arr);
248e5b2bc30SYevgeny Kliteynik 	return -ENOMEM;
249e5b2bc30SYevgeny Kliteynik }
250e5b2bc30SYevgeny Kliteynik 
251e5b2bc30SYevgeny Kliteynik static void dr_icm_buddy_cleanup_ste_cache(struct mlx5dr_icm_buddy_mem *buddy)
252e5b2bc30SYevgeny Kliteynik {
253e5b2bc30SYevgeny Kliteynik 	kvfree(buddy->ste_arr);
254e5b2bc30SYevgeny Kliteynik 	kvfree(buddy->hw_ste_arr);
255e5b2bc30SYevgeny Kliteynik 	kvfree(buddy->miss_list);
256e5b2bc30SYevgeny Kliteynik }
257e5b2bc30SYevgeny Kliteynik 
258a00cd878SYevgeny Kliteynik static int dr_icm_buddy_create(struct mlx5dr_icm_pool *pool)
25929cf8febSAlex Vesker {
260a00cd878SYevgeny Kliteynik 	struct mlx5dr_icm_buddy_mem *buddy;
261a00cd878SYevgeny Kliteynik 	struct mlx5dr_icm_mr *icm_mr;
26229cf8febSAlex Vesker 
263a00cd878SYevgeny Kliteynik 	icm_mr = dr_icm_pool_mr_create(pool);
264a00cd878SYevgeny Kliteynik 	if (!icm_mr)
265a00cd878SYevgeny Kliteynik 		return -ENOMEM;
266a00cd878SYevgeny Kliteynik 
267a00cd878SYevgeny Kliteynik 	buddy = kvzalloc(sizeof(*buddy), GFP_KERNEL);
268a00cd878SYevgeny Kliteynik 	if (!buddy)
269a00cd878SYevgeny Kliteynik 		goto free_mr;
270a00cd878SYevgeny Kliteynik 
271a00cd878SYevgeny Kliteynik 	if (mlx5dr_buddy_init(buddy, pool->max_log_chunk_sz))
272a00cd878SYevgeny Kliteynik 		goto err_free_buddy;
273a00cd878SYevgeny Kliteynik 
274a00cd878SYevgeny Kliteynik 	buddy->icm_mr = icm_mr;
275a00cd878SYevgeny Kliteynik 	buddy->pool = pool;
276a00cd878SYevgeny Kliteynik 
277e5b2bc30SYevgeny Kliteynik 	if (pool->icm_type == DR_ICM_TYPE_STE) {
278e5b2bc30SYevgeny Kliteynik 		/* Reduce allocations by preallocating and reusing the STE structures */
279e5b2bc30SYevgeny Kliteynik 		if (dr_icm_buddy_init_ste_cache(buddy))
280e5b2bc30SYevgeny Kliteynik 			goto err_cleanup_buddy;
281e5b2bc30SYevgeny Kliteynik 	}
282e5b2bc30SYevgeny Kliteynik 
283a00cd878SYevgeny Kliteynik 	/* add it to the -start- of the list in order to search in it first */
284a00cd878SYevgeny Kliteynik 	list_add(&buddy->list_node, &pool->buddy_mem_list);
285a00cd878SYevgeny Kliteynik 
286a00cd878SYevgeny Kliteynik 	return 0;
287a00cd878SYevgeny Kliteynik 
288e5b2bc30SYevgeny Kliteynik err_cleanup_buddy:
289e5b2bc30SYevgeny Kliteynik 	mlx5dr_buddy_cleanup(buddy);
290a00cd878SYevgeny Kliteynik err_free_buddy:
291a00cd878SYevgeny Kliteynik 	kvfree(buddy);
292a00cd878SYevgeny Kliteynik free_mr:
293a00cd878SYevgeny Kliteynik 	dr_icm_pool_mr_destroy(icm_mr);
294a00cd878SYevgeny Kliteynik 	return -ENOMEM;
29529cf8febSAlex Vesker }
29629cf8febSAlex Vesker 
297a00cd878SYevgeny Kliteynik static void dr_icm_buddy_destroy(struct mlx5dr_icm_buddy_mem *buddy)
29829cf8febSAlex Vesker {
29929cf8febSAlex Vesker 	struct mlx5dr_icm_chunk *chunk, *next;
30029cf8febSAlex Vesker 
301a00cd878SYevgeny Kliteynik 	list_for_each_entry_safe(chunk, next, &buddy->hot_list, chunk_list)
302284836d9SYevgeny Kliteynik 		dr_icm_chunk_destroy(chunk, buddy);
30329cf8febSAlex Vesker 
304a00cd878SYevgeny Kliteynik 	list_for_each_entry_safe(chunk, next, &buddy->used_list, chunk_list)
305284836d9SYevgeny Kliteynik 		dr_icm_chunk_destroy(chunk, buddy);
306a00cd878SYevgeny Kliteynik 
307a00cd878SYevgeny Kliteynik 	dr_icm_pool_mr_destroy(buddy->icm_mr);
308a00cd878SYevgeny Kliteynik 
309a00cd878SYevgeny Kliteynik 	mlx5dr_buddy_cleanup(buddy);
310a00cd878SYevgeny Kliteynik 
311e5b2bc30SYevgeny Kliteynik 	if (buddy->pool->icm_type == DR_ICM_TYPE_STE)
312e5b2bc30SYevgeny Kliteynik 		dr_icm_buddy_cleanup_ste_cache(buddy);
313e5b2bc30SYevgeny Kliteynik 
314a00cd878SYevgeny Kliteynik 	kvfree(buddy);
31529cf8febSAlex Vesker }
31629cf8febSAlex Vesker 
317a00cd878SYevgeny Kliteynik static struct mlx5dr_icm_chunk *
318a00cd878SYevgeny Kliteynik dr_icm_chunk_create(struct mlx5dr_icm_pool *pool,
319a00cd878SYevgeny Kliteynik 		    enum mlx5dr_icm_chunk_size chunk_size,
320a00cd878SYevgeny Kliteynik 		    struct mlx5dr_icm_buddy_mem *buddy_mem_pool,
321a00cd878SYevgeny Kliteynik 		    unsigned int seg)
32229cf8febSAlex Vesker {
323a00cd878SYevgeny Kliteynik 	struct mlx5dr_icm_chunk *chunk;
324a00cd878SYevgeny Kliteynik 	int offset;
32529cf8febSAlex Vesker 
326a00cd878SYevgeny Kliteynik 	chunk = kvzalloc(sizeof(*chunk), GFP_KERNEL);
327a00cd878SYevgeny Kliteynik 	if (!chunk)
328a00cd878SYevgeny Kliteynik 		return NULL;
32929cf8febSAlex Vesker 
330a00cd878SYevgeny Kliteynik 	offset = mlx5dr_icm_pool_dm_type_to_entry_size(pool->icm_type) * seg;
331a00cd878SYevgeny Kliteynik 
332a00cd878SYevgeny Kliteynik 	chunk->seg = seg;
333*f51bb517SRongwei Liu 	chunk->size = chunk_size;
334e5b2bc30SYevgeny Kliteynik 	chunk->buddy_mem = buddy_mem_pool;
335a00cd878SYevgeny Kliteynik 
336e5b2bc30SYevgeny Kliteynik 	if (pool->icm_type == DR_ICM_TYPE_STE)
337e5b2bc30SYevgeny Kliteynik 		dr_icm_chunk_ste_init(chunk, offset);
33829cf8febSAlex Vesker 
339*f51bb517SRongwei Liu 	buddy_mem_pool->used_memory += mlx5dr_icm_pool_get_chunk_byte_size(chunk);
340a00cd878SYevgeny Kliteynik 	INIT_LIST_HEAD(&chunk->chunk_list);
341a00cd878SYevgeny Kliteynik 
342a00cd878SYevgeny Kliteynik 	/* chunk now is part of the used_list */
343a00cd878SYevgeny Kliteynik 	list_add_tail(&chunk->chunk_list, &buddy_mem_pool->used_list);
344a00cd878SYevgeny Kliteynik 
345a00cd878SYevgeny Kliteynik 	return chunk;
346a00cd878SYevgeny Kliteynik }
347a00cd878SYevgeny Kliteynik 
348a00cd878SYevgeny Kliteynik static bool dr_icm_pool_is_sync_required(struct mlx5dr_icm_pool *pool)
34929cf8febSAlex Vesker {
350ecd9c5cdSYevgeny Kliteynik 	int allow_hot_size;
35129cf8febSAlex Vesker 
352ecd9c5cdSYevgeny Kliteynik 	/* sync when hot memory reaches half of the pool size */
353ecd9c5cdSYevgeny Kliteynik 	allow_hot_size =
354ecd9c5cdSYevgeny Kliteynik 		mlx5dr_icm_pool_chunk_size_to_byte(pool->max_log_chunk_sz,
355ecd9c5cdSYevgeny Kliteynik 						   pool->icm_type) / 2;
356ecd9c5cdSYevgeny Kliteynik 
357ecd9c5cdSYevgeny Kliteynik 	return pool->hot_memory_size > allow_hot_size;
358a00cd878SYevgeny Kliteynik }
359a00cd878SYevgeny Kliteynik 
360a00cd878SYevgeny Kliteynik static int dr_icm_pool_sync_all_buddy_pools(struct mlx5dr_icm_pool *pool)
36129cf8febSAlex Vesker {
362a00cd878SYevgeny Kliteynik 	struct mlx5dr_icm_buddy_mem *buddy, *tmp_buddy;
363*f51bb517SRongwei Liu 	u32 num_entries;
364a00cd878SYevgeny Kliteynik 	int err;
365a00cd878SYevgeny Kliteynik 
366a00cd878SYevgeny Kliteynik 	err = mlx5dr_cmd_sync_steering(pool->dmn->mdev);
367a00cd878SYevgeny Kliteynik 	if (err) {
368a00cd878SYevgeny Kliteynik 		mlx5dr_err(pool->dmn, "Failed to sync to HW (err: %d)\n", err);
369a00cd878SYevgeny Kliteynik 		return err;
37029cf8febSAlex Vesker 	}
37129cf8febSAlex Vesker 
372a00cd878SYevgeny Kliteynik 	list_for_each_entry_safe(buddy, tmp_buddy, &pool->buddy_mem_list, list_node) {
373a00cd878SYevgeny Kliteynik 		struct mlx5dr_icm_chunk *chunk, *tmp_chunk;
374a00cd878SYevgeny Kliteynik 
375a00cd878SYevgeny Kliteynik 		list_for_each_entry_safe(chunk, tmp_chunk, &buddy->hot_list, chunk_list) {
376*f51bb517SRongwei Liu 			num_entries = mlx5dr_icm_pool_get_chunk_num_of_entries(chunk);
377*f51bb517SRongwei Liu 			mlx5dr_buddy_free_mem(buddy, chunk->seg, ilog2(num_entries));
378*f51bb517SRongwei Liu 			pool->hot_memory_size -= mlx5dr_icm_pool_get_chunk_byte_size(chunk);
379284836d9SYevgeny Kliteynik 			dr_icm_chunk_destroy(chunk, buddy);
380a00cd878SYevgeny Kliteynik 		}
381284836d9SYevgeny Kliteynik 
382284836d9SYevgeny Kliteynik 		if (!buddy->used_memory && pool->icm_type == DR_ICM_TYPE_STE)
383284836d9SYevgeny Kliteynik 			dr_icm_buddy_destroy(buddy);
384a00cd878SYevgeny Kliteynik 	}
385a00cd878SYevgeny Kliteynik 
386a00cd878SYevgeny Kliteynik 	return 0;
387a00cd878SYevgeny Kliteynik }
388a00cd878SYevgeny Kliteynik 
389a00cd878SYevgeny Kliteynik static int dr_icm_handle_buddies_get_mem(struct mlx5dr_icm_pool *pool,
390a00cd878SYevgeny Kliteynik 					 enum mlx5dr_icm_chunk_size chunk_size,
391a00cd878SYevgeny Kliteynik 					 struct mlx5dr_icm_buddy_mem **buddy,
392a00cd878SYevgeny Kliteynik 					 unsigned int *seg)
39329cf8febSAlex Vesker {
394a00cd878SYevgeny Kliteynik 	struct mlx5dr_icm_buddy_mem *buddy_mem_pool;
395a00cd878SYevgeny Kliteynik 	bool new_mem = false;
396a00cd878SYevgeny Kliteynik 	int err;
39729cf8febSAlex Vesker 
398a00cd878SYevgeny Kliteynik alloc_buddy_mem:
399a00cd878SYevgeny Kliteynik 	/* find the next free place from the buddy list */
400a00cd878SYevgeny Kliteynik 	list_for_each_entry(buddy_mem_pool, &pool->buddy_mem_list, list_node) {
401a00cd878SYevgeny Kliteynik 		err = mlx5dr_buddy_alloc_mem(buddy_mem_pool,
402a00cd878SYevgeny Kliteynik 					     chunk_size, seg);
403a00cd878SYevgeny Kliteynik 		if (!err)
404a00cd878SYevgeny Kliteynik 			goto found;
40529cf8febSAlex Vesker 
406a00cd878SYevgeny Kliteynik 		if (WARN_ON(new_mem)) {
407a00cd878SYevgeny Kliteynik 			/* We have new memory pool, first in the list */
408a00cd878SYevgeny Kliteynik 			mlx5dr_err(pool->dmn,
409a00cd878SYevgeny Kliteynik 				   "No memory for order: %d\n",
410a00cd878SYevgeny Kliteynik 				   chunk_size);
411a00cd878SYevgeny Kliteynik 			goto out;
41229cf8febSAlex Vesker 		}
41329cf8febSAlex Vesker 	}
41429cf8febSAlex Vesker 
415a00cd878SYevgeny Kliteynik 	/* no more available allocators in that pool, create new */
416a00cd878SYevgeny Kliteynik 	err = dr_icm_buddy_create(pool);
417a00cd878SYevgeny Kliteynik 	if (err) {
418a00cd878SYevgeny Kliteynik 		mlx5dr_err(pool->dmn,
419a00cd878SYevgeny Kliteynik 			   "Failed creating buddy for order %d\n",
420a00cd878SYevgeny Kliteynik 			   chunk_size);
421a00cd878SYevgeny Kliteynik 		goto out;
42229cf8febSAlex Vesker 	}
42329cf8febSAlex Vesker 
424a00cd878SYevgeny Kliteynik 	/* mark we have new memory, first in list */
425a00cd878SYevgeny Kliteynik 	new_mem = true;
426a00cd878SYevgeny Kliteynik 	goto alloc_buddy_mem;
42729cf8febSAlex Vesker 
428a00cd878SYevgeny Kliteynik found:
429a00cd878SYevgeny Kliteynik 	*buddy = buddy_mem_pool;
430a00cd878SYevgeny Kliteynik out:
431a00cd878SYevgeny Kliteynik 	return err;
43229cf8febSAlex Vesker }
43329cf8febSAlex Vesker 
43429cf8febSAlex Vesker /* Allocate an ICM chunk, each chunk holds a piece of ICM memory and
43529cf8febSAlex Vesker  * also memory used for HW STE management for optimizations.
43629cf8febSAlex Vesker  */
43729cf8febSAlex Vesker struct mlx5dr_icm_chunk *
43829cf8febSAlex Vesker mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
43929cf8febSAlex Vesker 		       enum mlx5dr_icm_chunk_size chunk_size)
44029cf8febSAlex Vesker {
441a00cd878SYevgeny Kliteynik 	struct mlx5dr_icm_chunk *chunk = NULL;
442a00cd878SYevgeny Kliteynik 	struct mlx5dr_icm_buddy_mem *buddy;
443a00cd878SYevgeny Kliteynik 	unsigned int seg;
444a00cd878SYevgeny Kliteynik 	int ret;
44529cf8febSAlex Vesker 
44629cf8febSAlex Vesker 	if (chunk_size > pool->max_log_chunk_sz)
44729cf8febSAlex Vesker 		return NULL;
44829cf8febSAlex Vesker 
449a00cd878SYevgeny Kliteynik 	mutex_lock(&pool->mutex);
450a00cd878SYevgeny Kliteynik 	/* find mem, get back the relevant buddy pool and seg in that mem */
451a00cd878SYevgeny Kliteynik 	ret = dr_icm_handle_buddies_get_mem(pool, chunk_size, &buddy, &seg);
452a00cd878SYevgeny Kliteynik 	if (ret)
45329cf8febSAlex Vesker 		goto out;
45429cf8febSAlex Vesker 
455a00cd878SYevgeny Kliteynik 	chunk = dr_icm_chunk_create(pool, chunk_size, buddy, seg);
456a00cd878SYevgeny Kliteynik 	if (!chunk)
457a00cd878SYevgeny Kliteynik 		goto out_err;
458a00cd878SYevgeny Kliteynik 
459a00cd878SYevgeny Kliteynik 	goto out;
460a00cd878SYevgeny Kliteynik 
461a00cd878SYevgeny Kliteynik out_err:
462a00cd878SYevgeny Kliteynik 	mlx5dr_buddy_free_mem(buddy, seg, chunk_size);
46329cf8febSAlex Vesker out:
464a00cd878SYevgeny Kliteynik 	mutex_unlock(&pool->mutex);
46529cf8febSAlex Vesker 	return chunk;
46629cf8febSAlex Vesker }
46729cf8febSAlex Vesker 
46829cf8febSAlex Vesker void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk)
46929cf8febSAlex Vesker {
470a00cd878SYevgeny Kliteynik 	struct mlx5dr_icm_buddy_mem *buddy = chunk->buddy_mem;
4713eb1006aSYevgeny Kliteynik 	struct mlx5dr_icm_pool *pool = buddy->pool;
47229cf8febSAlex Vesker 
473a00cd878SYevgeny Kliteynik 	/* move the memory to the waiting list AKA "hot" */
4743eb1006aSYevgeny Kliteynik 	mutex_lock(&pool->mutex);
475a00cd878SYevgeny Kliteynik 	list_move_tail(&chunk->chunk_list, &buddy->hot_list);
476*f51bb517SRongwei Liu 	pool->hot_memory_size += mlx5dr_icm_pool_get_chunk_byte_size(chunk);
4773eb1006aSYevgeny Kliteynik 
4783eb1006aSYevgeny Kliteynik 	/* Check if we have chunks that are waiting for sync-ste */
4793eb1006aSYevgeny Kliteynik 	if (dr_icm_pool_is_sync_required(pool))
4803eb1006aSYevgeny Kliteynik 		dr_icm_pool_sync_all_buddy_pools(pool);
4813eb1006aSYevgeny Kliteynik 
4823eb1006aSYevgeny Kliteynik 	mutex_unlock(&pool->mutex);
48329cf8febSAlex Vesker }
48429cf8febSAlex Vesker 
48529cf8febSAlex Vesker struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn,
48629cf8febSAlex Vesker 					       enum mlx5dr_icm_type icm_type)
48729cf8febSAlex Vesker {
48829cf8febSAlex Vesker 	enum mlx5dr_icm_chunk_size max_log_chunk_sz;
48929cf8febSAlex Vesker 	struct mlx5dr_icm_pool *pool;
49029cf8febSAlex Vesker 
49129cf8febSAlex Vesker 	if (icm_type == DR_ICM_TYPE_STE)
49229cf8febSAlex Vesker 		max_log_chunk_sz = dmn->info.max_log_sw_icm_sz;
49329cf8febSAlex Vesker 	else
49429cf8febSAlex Vesker 		max_log_chunk_sz = dmn->info.max_log_action_icm_sz;
49529cf8febSAlex Vesker 
49629cf8febSAlex Vesker 	pool = kvzalloc(sizeof(*pool), GFP_KERNEL);
49729cf8febSAlex Vesker 	if (!pool)
49829cf8febSAlex Vesker 		return NULL;
49929cf8febSAlex Vesker 
50029cf8febSAlex Vesker 	pool->dmn = dmn;
50129cf8febSAlex Vesker 	pool->icm_type = icm_type;
50229cf8febSAlex Vesker 	pool->max_log_chunk_sz = max_log_chunk_sz;
50329cf8febSAlex Vesker 
504a00cd878SYevgeny Kliteynik 	INIT_LIST_HEAD(&pool->buddy_mem_list);
50529cf8febSAlex Vesker 
506a00cd878SYevgeny Kliteynik 	mutex_init(&pool->mutex);
50729cf8febSAlex Vesker 
50829cf8febSAlex Vesker 	return pool;
50929cf8febSAlex Vesker }
51029cf8febSAlex Vesker 
51129cf8febSAlex Vesker void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool)
51229cf8febSAlex Vesker {
513a00cd878SYevgeny Kliteynik 	struct mlx5dr_icm_buddy_mem *buddy, *tmp_buddy;
51429cf8febSAlex Vesker 
515a00cd878SYevgeny Kliteynik 	list_for_each_entry_safe(buddy, tmp_buddy, &pool->buddy_mem_list, list_node)
516a00cd878SYevgeny Kliteynik 		dr_icm_buddy_destroy(buddy);
51729cf8febSAlex Vesker 
518a00cd878SYevgeny Kliteynik 	mutex_destroy(&pool->mutex);
51929cf8febSAlex Vesker 	kvfree(pool);
52029cf8febSAlex Vesker }
521