1 /*
2  * Copyright (c) 2016 Hisilicon Limited.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include "hns_roce_device.h"
35 #include "hns_roce_hem.h"
36 #include "hns_roce_common.h"
37 
38 #define HEM_INDEX_BUF			BIT(0)
39 #define HEM_INDEX_L0			BIT(1)
40 #define HEM_INDEX_L1			BIT(2)
41 struct hns_roce_hem_index {
42 	u64 buf;
43 	u64 l0;
44 	u64 l1;
45 	u32 inited; /* indicate which index is available */
46 };
47 
48 bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
49 {
50 	int hop_num = 0;
51 
52 	switch (type) {
53 	case HEM_TYPE_QPC:
54 		hop_num = hr_dev->caps.qpc_hop_num;
55 		break;
56 	case HEM_TYPE_MTPT:
57 		hop_num = hr_dev->caps.mpt_hop_num;
58 		break;
59 	case HEM_TYPE_CQC:
60 		hop_num = hr_dev->caps.cqc_hop_num;
61 		break;
62 	case HEM_TYPE_SRQC:
63 		hop_num = hr_dev->caps.srqc_hop_num;
64 		break;
65 	case HEM_TYPE_SCCC:
66 		hop_num = hr_dev->caps.sccc_hop_num;
67 		break;
68 	case HEM_TYPE_QPC_TIMER:
69 		hop_num = hr_dev->caps.qpc_timer_hop_num;
70 		break;
71 	case HEM_TYPE_CQC_TIMER:
72 		hop_num = hr_dev->caps.cqc_timer_hop_num;
73 		break;
74 	case HEM_TYPE_GMV:
75 		hop_num = hr_dev->caps.gmv_hop_num;
76 		break;
77 	default:
78 		return false;
79 	}
80 
81 	return hop_num ? true : false;
82 }
83 
84 static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 hem_idx,
85 				    u32 bt_chunk_num, u64 hem_max_num)
86 {
87 	u64 start_idx = round_down(hem_idx, bt_chunk_num);
88 	u64 check_max_num = start_idx + bt_chunk_num;
89 	u64 i;
90 
91 	for (i = start_idx; (i < check_max_num) && (i < hem_max_num); i++)
92 		if (i != hem_idx && hem[i])
93 			return false;
94 
95 	return true;
96 }
97 
98 static bool hns_roce_check_bt_null(u64 **bt, u64 ba_idx, u32 bt_chunk_num)
99 {
100 	u64 start_idx = round_down(ba_idx, bt_chunk_num);
101 	int i;
102 
103 	for (i = 0; i < bt_chunk_num; i++)
104 		if (i != ba_idx && bt[start_idx + i])
105 			return false;
106 
107 	return true;
108 }
109 
110 static int hns_roce_get_bt_num(u32 table_type, u32 hop_num)
111 {
112 	if (check_whether_bt_num_3(table_type, hop_num))
113 		return 3;
114 	else if (check_whether_bt_num_2(table_type, hop_num))
115 		return 2;
116 	else if (check_whether_bt_num_1(table_type, hop_num))
117 		return 1;
118 	else
119 		return 0;
120 }
121 
122 static int get_hem_table_config(struct hns_roce_dev *hr_dev,
123 				struct hns_roce_hem_mhop *mhop,
124 				u32 type)
125 {
126 	struct device *dev = hr_dev->dev;
127 
128 	switch (type) {
129 	case HEM_TYPE_QPC:
130 		mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz
131 					     + PAGE_SHIFT);
132 		mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_ba_pg_sz
133 					     + PAGE_SHIFT);
134 		mhop->ba_l0_num = hr_dev->caps.qpc_bt_num;
135 		mhop->hop_num = hr_dev->caps.qpc_hop_num;
136 		break;
137 	case HEM_TYPE_MTPT:
138 		mhop->buf_chunk_size = 1 << (hr_dev->caps.mpt_buf_pg_sz
139 					     + PAGE_SHIFT);
140 		mhop->bt_chunk_size = 1 << (hr_dev->caps.mpt_ba_pg_sz
141 					     + PAGE_SHIFT);
142 		mhop->ba_l0_num = hr_dev->caps.mpt_bt_num;
143 		mhop->hop_num = hr_dev->caps.mpt_hop_num;
144 		break;
145 	case HEM_TYPE_CQC:
146 		mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_buf_pg_sz
147 					     + PAGE_SHIFT);
148 		mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_ba_pg_sz
149 					    + PAGE_SHIFT);
150 		mhop->ba_l0_num = hr_dev->caps.cqc_bt_num;
151 		mhop->hop_num = hr_dev->caps.cqc_hop_num;
152 		break;
153 	case HEM_TYPE_SCCC:
154 		mhop->buf_chunk_size = 1 << (hr_dev->caps.sccc_buf_pg_sz
155 					     + PAGE_SHIFT);
156 		mhop->bt_chunk_size = 1 << (hr_dev->caps.sccc_ba_pg_sz
157 					    + PAGE_SHIFT);
158 		mhop->ba_l0_num = hr_dev->caps.sccc_bt_num;
159 		mhop->hop_num = hr_dev->caps.sccc_hop_num;
160 		break;
161 	case HEM_TYPE_QPC_TIMER:
162 		mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz
163 					     + PAGE_SHIFT);
164 		mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz
165 					    + PAGE_SHIFT);
166 		mhop->ba_l0_num = hr_dev->caps.qpc_timer_bt_num;
167 		mhop->hop_num = hr_dev->caps.qpc_timer_hop_num;
168 		break;
169 	case HEM_TYPE_CQC_TIMER:
170 		mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz
171 					     + PAGE_SHIFT);
172 		mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz
173 					    + PAGE_SHIFT);
174 		mhop->ba_l0_num = hr_dev->caps.cqc_timer_bt_num;
175 		mhop->hop_num = hr_dev->caps.cqc_timer_hop_num;
176 		break;
177 	case HEM_TYPE_SRQC:
178 		mhop->buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
179 					     + PAGE_SHIFT);
180 		mhop->bt_chunk_size = 1 << (hr_dev->caps.srqc_ba_pg_sz
181 					     + PAGE_SHIFT);
182 		mhop->ba_l0_num = hr_dev->caps.srqc_bt_num;
183 		mhop->hop_num = hr_dev->caps.srqc_hop_num;
184 		break;
185 	case HEM_TYPE_GMV:
186 		mhop->buf_chunk_size = 1 << (hr_dev->caps.gmv_buf_pg_sz +
187 					     PAGE_SHIFT);
188 		mhop->bt_chunk_size = 1 << (hr_dev->caps.gmv_ba_pg_sz +
189 					    PAGE_SHIFT);
190 		mhop->ba_l0_num = hr_dev->caps.gmv_bt_num;
191 		mhop->hop_num = hr_dev->caps.gmv_hop_num;
192 		break;
193 	default:
194 		dev_err(dev, "table %u not support multi-hop addressing!\n",
195 			type);
196 		return -EINVAL;
197 	}
198 
199 	return 0;
200 }
201 
202 int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
203 			   struct hns_roce_hem_table *table, unsigned long *obj,
204 			   struct hns_roce_hem_mhop *mhop)
205 {
206 	struct device *dev = hr_dev->dev;
207 	u32 chunk_ba_num;
208 	u32 chunk_size;
209 	u32 table_idx;
210 	u32 bt_num;
211 
212 	if (get_hem_table_config(hr_dev, mhop, table->type))
213 		return -EINVAL;
214 
215 	if (!obj)
216 		return 0;
217 
218 	/*
219 	 * QPC/MTPT/CQC/SRQC/SCCC alloc hem for buffer pages.
220 	 * MTT/CQE alloc hem for bt pages.
221 	 */
222 	bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
223 	chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
224 	chunk_size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size :
225 			      mhop->bt_chunk_size;
226 	table_idx = *obj / (chunk_size / table->obj_size);
227 	switch (bt_num) {
228 	case 3:
229 		mhop->l2_idx = table_idx & (chunk_ba_num - 1);
230 		mhop->l1_idx = table_idx / chunk_ba_num & (chunk_ba_num - 1);
231 		mhop->l0_idx = (table_idx / chunk_ba_num) / chunk_ba_num;
232 		break;
233 	case 2:
234 		mhop->l1_idx = table_idx & (chunk_ba_num - 1);
235 		mhop->l0_idx = table_idx / chunk_ba_num;
236 		break;
237 	case 1:
238 		mhop->l0_idx = table_idx;
239 		break;
240 	default:
241 		dev_err(dev, "table %u not support hop_num = %u!\n",
242 			table->type, mhop->hop_num);
243 		return -EINVAL;
244 	}
245 	if (mhop->l0_idx >= mhop->ba_l0_num)
246 		mhop->l0_idx %= mhop->ba_l0_num;
247 
248 	return 0;
249 }
250 
251 static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
252 					       int npages,
253 					       unsigned long hem_alloc_size,
254 					       gfp_t gfp_mask)
255 {
256 	struct hns_roce_hem_chunk *chunk = NULL;
257 	struct hns_roce_hem *hem;
258 	struct scatterlist *mem;
259 	int order;
260 	void *buf;
261 
262 	WARN_ON(gfp_mask & __GFP_HIGHMEM);
263 
264 	hem = kmalloc(sizeof(*hem),
265 		      gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
266 	if (!hem)
267 		return NULL;
268 
269 	INIT_LIST_HEAD(&hem->chunk_list);
270 
271 	order = get_order(hem_alloc_size);
272 
273 	while (npages > 0) {
274 		if (!chunk) {
275 			chunk = kmalloc(sizeof(*chunk),
276 				gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
277 			if (!chunk)
278 				goto fail;
279 
280 			sg_init_table(chunk->mem, HNS_ROCE_HEM_CHUNK_LEN);
281 			chunk->npages = 0;
282 			chunk->nsg = 0;
283 			memset(chunk->buf, 0, sizeof(chunk->buf));
284 			list_add_tail(&chunk->list, &hem->chunk_list);
285 		}
286 
287 		while (1 << order > npages)
288 			--order;
289 
290 		/*
291 		 * Alloc memory one time. If failed, don't alloc small block
292 		 * memory, directly return fail.
293 		 */
294 		mem = &chunk->mem[chunk->npages];
295 		buf = dma_alloc_coherent(hr_dev->dev, PAGE_SIZE << order,
296 				&sg_dma_address(mem), gfp_mask);
297 		if (!buf)
298 			goto fail;
299 
300 		chunk->buf[chunk->npages] = buf;
301 		sg_dma_len(mem) = PAGE_SIZE << order;
302 
303 		++chunk->npages;
304 		++chunk->nsg;
305 		npages -= 1 << order;
306 	}
307 
308 	return hem;
309 
310 fail:
311 	hns_roce_free_hem(hr_dev, hem);
312 	return NULL;
313 }
314 
315 void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
316 {
317 	struct hns_roce_hem_chunk *chunk, *tmp;
318 	int i;
319 
320 	if (!hem)
321 		return;
322 
323 	list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) {
324 		for (i = 0; i < chunk->npages; ++i)
325 			dma_free_coherent(hr_dev->dev,
326 				   sg_dma_len(&chunk->mem[i]),
327 				   chunk->buf[i],
328 				   sg_dma_address(&chunk->mem[i]));
329 		kfree(chunk);
330 	}
331 
332 	kfree(hem);
333 }
334 
335 static int calc_hem_config(struct hns_roce_dev *hr_dev,
336 			   struct hns_roce_hem_table *table, unsigned long obj,
337 			   struct hns_roce_hem_mhop *mhop,
338 			   struct hns_roce_hem_index *index)
339 {
340 	struct ib_device *ibdev = &hr_dev->ib_dev;
341 	unsigned long mhop_obj = obj;
342 	u32 l0_idx, l1_idx, l2_idx;
343 	u32 chunk_ba_num;
344 	u32 bt_num;
345 	int ret;
346 
347 	ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, mhop);
348 	if (ret)
349 		return ret;
350 
351 	l0_idx = mhop->l0_idx;
352 	l1_idx = mhop->l1_idx;
353 	l2_idx = mhop->l2_idx;
354 	chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
355 	bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
356 	switch (bt_num) {
357 	case 3:
358 		index->l1 = l0_idx * chunk_ba_num + l1_idx;
359 		index->l0 = l0_idx;
360 		index->buf = l0_idx * chunk_ba_num * chunk_ba_num +
361 			     l1_idx * chunk_ba_num + l2_idx;
362 		break;
363 	case 2:
364 		index->l0 = l0_idx;
365 		index->buf = l0_idx * chunk_ba_num + l1_idx;
366 		break;
367 	case 1:
368 		index->buf = l0_idx;
369 		break;
370 	default:
371 		ibdev_err(ibdev, "table %u not support mhop.hop_num = %u!\n",
372 			  table->type, mhop->hop_num);
373 		return -EINVAL;
374 	}
375 
376 	if (unlikely(index->buf >= table->num_hem)) {
377 		ibdev_err(ibdev, "table %u exceed hem limt idx %llu, max %lu!\n",
378 			  table->type, index->buf, table->num_hem);
379 		return -EINVAL;
380 	}
381 
382 	return 0;
383 }
384 
385 static void free_mhop_hem(struct hns_roce_dev *hr_dev,
386 			  struct hns_roce_hem_table *table,
387 			  struct hns_roce_hem_mhop *mhop,
388 			  struct hns_roce_hem_index *index)
389 {
390 	u32 bt_size = mhop->bt_chunk_size;
391 	struct device *dev = hr_dev->dev;
392 
393 	if (index->inited & HEM_INDEX_BUF) {
394 		hns_roce_free_hem(hr_dev, table->hem[index->buf]);
395 		table->hem[index->buf] = NULL;
396 	}
397 
398 	if (index->inited & HEM_INDEX_L1) {
399 		dma_free_coherent(dev, bt_size, table->bt_l1[index->l1],
400 				  table->bt_l1_dma_addr[index->l1]);
401 		table->bt_l1[index->l1] = NULL;
402 	}
403 
404 	if (index->inited & HEM_INDEX_L0) {
405 		dma_free_coherent(dev, bt_size, table->bt_l0[index->l0],
406 				  table->bt_l0_dma_addr[index->l0]);
407 		table->bt_l0[index->l0] = NULL;
408 	}
409 }
410 
411 static int alloc_mhop_hem(struct hns_roce_dev *hr_dev,
412 			  struct hns_roce_hem_table *table,
413 			  struct hns_roce_hem_mhop *mhop,
414 			  struct hns_roce_hem_index *index)
415 {
416 	u32 bt_size = mhop->bt_chunk_size;
417 	struct device *dev = hr_dev->dev;
418 	struct hns_roce_hem_iter iter;
419 	gfp_t flag;
420 	u64 bt_ba;
421 	u32 size;
422 	int ret;
423 
424 	/* alloc L1 BA's chunk */
425 	if ((check_whether_bt_num_3(table->type, mhop->hop_num) ||
426 	     check_whether_bt_num_2(table->type, mhop->hop_num)) &&
427 	     !table->bt_l0[index->l0]) {
428 		table->bt_l0[index->l0] = dma_alloc_coherent(dev, bt_size,
429 					    &table->bt_l0_dma_addr[index->l0],
430 					    GFP_KERNEL);
431 		if (!table->bt_l0[index->l0]) {
432 			ret = -ENOMEM;
433 			goto out;
434 		}
435 		index->inited |= HEM_INDEX_L0;
436 	}
437 
438 	/* alloc L2 BA's chunk */
439 	if (check_whether_bt_num_3(table->type, mhop->hop_num) &&
440 	    !table->bt_l1[index->l1])  {
441 		table->bt_l1[index->l1] = dma_alloc_coherent(dev, bt_size,
442 					    &table->bt_l1_dma_addr[index->l1],
443 					    GFP_KERNEL);
444 		if (!table->bt_l1[index->l1]) {
445 			ret = -ENOMEM;
446 			goto err_alloc_hem;
447 		}
448 		index->inited |= HEM_INDEX_L1;
449 		*(table->bt_l0[index->l0] + mhop->l1_idx) =
450 					       table->bt_l1_dma_addr[index->l1];
451 	}
452 
453 	/*
454 	 * alloc buffer space chunk for QPC/MTPT/CQC/SRQC/SCCC.
455 	 * alloc bt space chunk for MTT/CQE.
456 	 */
457 	size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size : bt_size;
458 	flag = GFP_KERNEL | __GFP_NOWARN;
459 	table->hem[index->buf] = hns_roce_alloc_hem(hr_dev, size >> PAGE_SHIFT,
460 						    size, flag);
461 	if (!table->hem[index->buf]) {
462 		ret = -ENOMEM;
463 		goto err_alloc_hem;
464 	}
465 
466 	index->inited |= HEM_INDEX_BUF;
467 	hns_roce_hem_first(table->hem[index->buf], &iter);
468 	bt_ba = hns_roce_hem_addr(&iter);
469 	if (table->type < HEM_TYPE_MTT) {
470 		if (mhop->hop_num == 2)
471 			*(table->bt_l1[index->l1] + mhop->l2_idx) = bt_ba;
472 		else if (mhop->hop_num == 1)
473 			*(table->bt_l0[index->l0] + mhop->l1_idx) = bt_ba;
474 	} else if (mhop->hop_num == 2) {
475 		*(table->bt_l0[index->l0] + mhop->l1_idx) = bt_ba;
476 	}
477 
478 	return 0;
479 err_alloc_hem:
480 	free_mhop_hem(hr_dev, table, mhop, index);
481 out:
482 	return ret;
483 }
484 
485 static int set_mhop_hem(struct hns_roce_dev *hr_dev,
486 			struct hns_roce_hem_table *table, unsigned long obj,
487 			struct hns_roce_hem_mhop *mhop,
488 			struct hns_roce_hem_index *index)
489 {
490 	struct ib_device *ibdev = &hr_dev->ib_dev;
491 	u32 step_idx;
492 	int ret = 0;
493 
494 	if (index->inited & HEM_INDEX_L0) {
495 		ret = hr_dev->hw->set_hem(hr_dev, table, obj, 0);
496 		if (ret) {
497 			ibdev_err(ibdev, "set HEM step 0 failed!\n");
498 			goto out;
499 		}
500 	}
501 
502 	if (index->inited & HEM_INDEX_L1) {
503 		ret = hr_dev->hw->set_hem(hr_dev, table, obj, 1);
504 		if (ret) {
505 			ibdev_err(ibdev, "set HEM step 1 failed!\n");
506 			goto out;
507 		}
508 	}
509 
510 	if (index->inited & HEM_INDEX_BUF) {
511 		if (mhop->hop_num == HNS_ROCE_HOP_NUM_0)
512 			step_idx = 0;
513 		else
514 			step_idx = mhop->hop_num;
515 		ret = hr_dev->hw->set_hem(hr_dev, table, obj, step_idx);
516 		if (ret)
517 			ibdev_err(ibdev, "set HEM step last failed!\n");
518 	}
519 out:
520 	return ret;
521 }
522 
523 static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
524 				   struct hns_roce_hem_table *table,
525 				   unsigned long obj)
526 {
527 	struct ib_device *ibdev = &hr_dev->ib_dev;
528 	struct hns_roce_hem_index index = {};
529 	struct hns_roce_hem_mhop mhop = {};
530 	int ret;
531 
532 	ret = calc_hem_config(hr_dev, table, obj, &mhop, &index);
533 	if (ret) {
534 		ibdev_err(ibdev, "calc hem config failed!\n");
535 		return ret;
536 	}
537 
538 	mutex_lock(&table->mutex);
539 	if (table->hem[index.buf]) {
540 		refcount_inc(&table->hem[index.buf]->refcount);
541 		goto out;
542 	}
543 
544 	ret = alloc_mhop_hem(hr_dev, table, &mhop, &index);
545 	if (ret) {
546 		ibdev_err(ibdev, "alloc mhop hem failed!\n");
547 		goto out;
548 	}
549 
550 	/* set HEM base address to hardware */
551 	if (table->type < HEM_TYPE_MTT) {
552 		ret = set_mhop_hem(hr_dev, table, obj, &mhop, &index);
553 		if (ret) {
554 			ibdev_err(ibdev, "set HEM address to HW failed!\n");
555 			goto err_alloc;
556 		}
557 	}
558 
559 	refcount_set(&table->hem[index.buf]->refcount, 1);
560 	goto out;
561 
562 err_alloc:
563 	free_mhop_hem(hr_dev, table, &mhop, &index);
564 out:
565 	mutex_unlock(&table->mutex);
566 	return ret;
567 }
568 
569 int hns_roce_table_get(struct hns_roce_dev *hr_dev,
570 		       struct hns_roce_hem_table *table, unsigned long obj)
571 {
572 	struct device *dev = hr_dev->dev;
573 	unsigned long i;
574 	int ret = 0;
575 
576 	if (hns_roce_check_whether_mhop(hr_dev, table->type))
577 		return hns_roce_table_mhop_get(hr_dev, table, obj);
578 
579 	i = obj / (table->table_chunk_size / table->obj_size);
580 
581 	mutex_lock(&table->mutex);
582 
583 	if (table->hem[i]) {
584 		refcount_inc(&table->hem[i]->refcount);
585 		goto out;
586 	}
587 
588 	table->hem[i] = hns_roce_alloc_hem(hr_dev,
589 				       table->table_chunk_size >> PAGE_SHIFT,
590 				       table->table_chunk_size,
591 				       GFP_KERNEL | __GFP_NOWARN);
592 	if (!table->hem[i]) {
593 		ret = -ENOMEM;
594 		goto out;
595 	}
596 
597 	/* Set HEM base address(128K/page, pa) to Hardware */
598 	if (hr_dev->hw->set_hem(hr_dev, table, obj, HEM_HOP_STEP_DIRECT)) {
599 		hns_roce_free_hem(hr_dev, table->hem[i]);
600 		table->hem[i] = NULL;
601 		ret = -ENODEV;
602 		dev_err(dev, "set HEM base address to HW failed.\n");
603 		goto out;
604 	}
605 
606 	refcount_set(&table->hem[i]->refcount, 1);
607 out:
608 	mutex_unlock(&table->mutex);
609 	return ret;
610 }
611 
612 static void clear_mhop_hem(struct hns_roce_dev *hr_dev,
613 			   struct hns_roce_hem_table *table, unsigned long obj,
614 			   struct hns_roce_hem_mhop *mhop,
615 			   struct hns_roce_hem_index *index)
616 {
617 	struct ib_device *ibdev = &hr_dev->ib_dev;
618 	u32 hop_num = mhop->hop_num;
619 	u32 chunk_ba_num;
620 	u32 step_idx;
621 
622 	index->inited = HEM_INDEX_BUF;
623 	chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
624 	if (check_whether_bt_num_2(table->type, hop_num)) {
625 		if (hns_roce_check_hem_null(table->hem, index->buf,
626 					    chunk_ba_num, table->num_hem))
627 			index->inited |= HEM_INDEX_L0;
628 	} else if (check_whether_bt_num_3(table->type, hop_num)) {
629 		if (hns_roce_check_hem_null(table->hem, index->buf,
630 					    chunk_ba_num, table->num_hem)) {
631 			index->inited |= HEM_INDEX_L1;
632 			if (hns_roce_check_bt_null(table->bt_l1, index->l1,
633 						   chunk_ba_num))
634 				index->inited |= HEM_INDEX_L0;
635 		}
636 	}
637 
638 	if (table->type < HEM_TYPE_MTT) {
639 		if (hop_num == HNS_ROCE_HOP_NUM_0)
640 			step_idx = 0;
641 		else
642 			step_idx = hop_num;
643 
644 		if (hr_dev->hw->clear_hem(hr_dev, table, obj, step_idx))
645 			ibdev_warn(ibdev, "failed to clear hop%u HEM.\n", hop_num);
646 
647 		if (index->inited & HEM_INDEX_L1)
648 			if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1))
649 				ibdev_warn(ibdev, "failed to clear HEM step 1.\n");
650 
651 		if (index->inited & HEM_INDEX_L0)
652 			if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
653 				ibdev_warn(ibdev, "failed to clear HEM step 0.\n");
654 	}
655 }
656 
657 static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
658 				    struct hns_roce_hem_table *table,
659 				    unsigned long obj,
660 				    int check_refcount)
661 {
662 	struct ib_device *ibdev = &hr_dev->ib_dev;
663 	struct hns_roce_hem_index index = {};
664 	struct hns_roce_hem_mhop mhop = {};
665 	int ret;
666 
667 	ret = calc_hem_config(hr_dev, table, obj, &mhop, &index);
668 	if (ret) {
669 		ibdev_err(ibdev, "calc hem config failed!\n");
670 		return;
671 	}
672 
673 	if (!check_refcount)
674 		mutex_lock(&table->mutex);
675 	else if (!refcount_dec_and_mutex_lock(&table->hem[index.buf]->refcount,
676 					      &table->mutex))
677 		return;
678 
679 	clear_mhop_hem(hr_dev, table, obj, &mhop, &index);
680 	free_mhop_hem(hr_dev, table, &mhop, &index);
681 
682 	mutex_unlock(&table->mutex);
683 }
684 
685 void hns_roce_table_put(struct hns_roce_dev *hr_dev,
686 			struct hns_roce_hem_table *table, unsigned long obj)
687 {
688 	struct device *dev = hr_dev->dev;
689 	unsigned long i;
690 
691 	if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
692 		hns_roce_table_mhop_put(hr_dev, table, obj, 1);
693 		return;
694 	}
695 
696 	i = obj / (table->table_chunk_size / table->obj_size);
697 
698 	if (!refcount_dec_and_mutex_lock(&table->hem[i]->refcount,
699 					 &table->mutex))
700 		return;
701 
702 	if (hr_dev->hw->clear_hem(hr_dev, table, obj, HEM_HOP_STEP_DIRECT))
703 		dev_warn(dev, "failed to clear HEM base address.\n");
704 
705 	hns_roce_free_hem(hr_dev, table->hem[i]);
706 	table->hem[i] = NULL;
707 
708 	mutex_unlock(&table->mutex);
709 }
710 
711 void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
712 			  struct hns_roce_hem_table *table,
713 			  unsigned long obj, dma_addr_t *dma_handle)
714 {
715 	struct hns_roce_hem_chunk *chunk;
716 	struct hns_roce_hem_mhop mhop;
717 	struct hns_roce_hem *hem;
718 	unsigned long mhop_obj = obj;
719 	unsigned long obj_per_chunk;
720 	unsigned long idx_offset;
721 	int offset, dma_offset;
722 	void *addr = NULL;
723 	u32 hem_idx = 0;
724 	int length;
725 	int i, j;
726 
727 	mutex_lock(&table->mutex);
728 
729 	if (!hns_roce_check_whether_mhop(hr_dev, table->type)) {
730 		obj_per_chunk = table->table_chunk_size / table->obj_size;
731 		hem = table->hem[obj / obj_per_chunk];
732 		idx_offset = obj % obj_per_chunk;
733 		dma_offset = offset = idx_offset * table->obj_size;
734 	} else {
735 		u32 seg_size = 64; /* 8 bytes per BA and 8 BA per segment */
736 
737 		if (hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop))
738 			goto out;
739 		/* mtt mhop */
740 		i = mhop.l0_idx;
741 		j = mhop.l1_idx;
742 		if (mhop.hop_num == 2)
743 			hem_idx = i * (mhop.bt_chunk_size / BA_BYTE_LEN) + j;
744 		else if (mhop.hop_num == 1 ||
745 			 mhop.hop_num == HNS_ROCE_HOP_NUM_0)
746 			hem_idx = i;
747 
748 		hem = table->hem[hem_idx];
749 		dma_offset = offset = obj * seg_size % mhop.bt_chunk_size;
750 		if (mhop.hop_num == 2)
751 			dma_offset = offset = 0;
752 	}
753 
754 	if (!hem)
755 		goto out;
756 
757 	list_for_each_entry(chunk, &hem->chunk_list, list) {
758 		for (i = 0; i < chunk->npages; ++i) {
759 			length = sg_dma_len(&chunk->mem[i]);
760 			if (dma_handle && dma_offset >= 0) {
761 				if (length > (u32)dma_offset)
762 					*dma_handle = sg_dma_address(
763 						&chunk->mem[i]) + dma_offset;
764 				dma_offset -= length;
765 			}
766 
767 			if (length > (u32)offset) {
768 				addr = chunk->buf[i] + offset;
769 				goto out;
770 			}
771 			offset -= length;
772 		}
773 	}
774 
775 out:
776 	mutex_unlock(&table->mutex);
777 	return addr;
778 }
779 
780 int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
781 			    struct hns_roce_hem_table *table, u32 type,
782 			    unsigned long obj_size, unsigned long nobj)
783 {
784 	unsigned long obj_per_chunk;
785 	unsigned long num_hem;
786 
787 	if (!hns_roce_check_whether_mhop(hr_dev, type)) {
788 		table->table_chunk_size = hr_dev->caps.chunk_sz;
789 		obj_per_chunk = table->table_chunk_size / obj_size;
790 		num_hem = DIV_ROUND_UP(nobj, obj_per_chunk);
791 
792 		table->hem = kcalloc(num_hem, sizeof(*table->hem), GFP_KERNEL);
793 		if (!table->hem)
794 			return -ENOMEM;
795 	} else {
796 		struct hns_roce_hem_mhop mhop = {};
797 		unsigned long buf_chunk_size;
798 		unsigned long bt_chunk_size;
799 		unsigned long bt_chunk_num;
800 		unsigned long num_bt_l0;
801 		u32 hop_num;
802 
803 		if (get_hem_table_config(hr_dev, &mhop, type))
804 			return -EINVAL;
805 
806 		buf_chunk_size = mhop.buf_chunk_size;
807 		bt_chunk_size = mhop.bt_chunk_size;
808 		num_bt_l0 = mhop.ba_l0_num;
809 		hop_num = mhop.hop_num;
810 
811 		obj_per_chunk = buf_chunk_size / obj_size;
812 		num_hem = DIV_ROUND_UP(nobj, obj_per_chunk);
813 		bt_chunk_num = bt_chunk_size / BA_BYTE_LEN;
814 
815 		if (type >= HEM_TYPE_MTT)
816 			num_bt_l0 = bt_chunk_num;
817 
818 		table->hem = kcalloc(num_hem, sizeof(*table->hem),
819 					 GFP_KERNEL);
820 		if (!table->hem)
821 			goto err_kcalloc_hem_buf;
822 
823 		if (check_whether_bt_num_3(type, hop_num)) {
824 			unsigned long num_bt_l1;
825 
826 			num_bt_l1 = DIV_ROUND_UP(num_hem, bt_chunk_num);
827 			table->bt_l1 = kcalloc(num_bt_l1,
828 					       sizeof(*table->bt_l1),
829 					       GFP_KERNEL);
830 			if (!table->bt_l1)
831 				goto err_kcalloc_bt_l1;
832 
833 			table->bt_l1_dma_addr = kcalloc(num_bt_l1,
834 						 sizeof(*table->bt_l1_dma_addr),
835 						 GFP_KERNEL);
836 
837 			if (!table->bt_l1_dma_addr)
838 				goto err_kcalloc_l1_dma;
839 		}
840 
841 		if (check_whether_bt_num_2(type, hop_num) ||
842 			check_whether_bt_num_3(type, hop_num)) {
843 			table->bt_l0 = kcalloc(num_bt_l0, sizeof(*table->bt_l0),
844 					       GFP_KERNEL);
845 			if (!table->bt_l0)
846 				goto err_kcalloc_bt_l0;
847 
848 			table->bt_l0_dma_addr = kcalloc(num_bt_l0,
849 						 sizeof(*table->bt_l0_dma_addr),
850 						 GFP_KERNEL);
851 			if (!table->bt_l0_dma_addr)
852 				goto err_kcalloc_l0_dma;
853 		}
854 	}
855 
856 	table->type = type;
857 	table->num_hem = num_hem;
858 	table->obj_size = obj_size;
859 	mutex_init(&table->mutex);
860 
861 	return 0;
862 
863 err_kcalloc_l0_dma:
864 	kfree(table->bt_l0);
865 	table->bt_l0 = NULL;
866 
867 err_kcalloc_bt_l0:
868 	kfree(table->bt_l1_dma_addr);
869 	table->bt_l1_dma_addr = NULL;
870 
871 err_kcalloc_l1_dma:
872 	kfree(table->bt_l1);
873 	table->bt_l1 = NULL;
874 
875 err_kcalloc_bt_l1:
876 	kfree(table->hem);
877 	table->hem = NULL;
878 
879 err_kcalloc_hem_buf:
880 	return -ENOMEM;
881 }
882 
883 static void hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev *hr_dev,
884 					    struct hns_roce_hem_table *table)
885 {
886 	struct hns_roce_hem_mhop mhop;
887 	u32 buf_chunk_size;
888 	u64 obj;
889 	int i;
890 
891 	if (hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop))
892 		return;
893 	buf_chunk_size = table->type < HEM_TYPE_MTT ? mhop.buf_chunk_size :
894 					mhop.bt_chunk_size;
895 
896 	for (i = 0; i < table->num_hem; ++i) {
897 		obj = i * buf_chunk_size / table->obj_size;
898 		if (table->hem[i])
899 			hns_roce_table_mhop_put(hr_dev, table, obj, 0);
900 	}
901 
902 	kfree(table->hem);
903 	table->hem = NULL;
904 	kfree(table->bt_l1);
905 	table->bt_l1 = NULL;
906 	kfree(table->bt_l1_dma_addr);
907 	table->bt_l1_dma_addr = NULL;
908 	kfree(table->bt_l0);
909 	table->bt_l0 = NULL;
910 	kfree(table->bt_l0_dma_addr);
911 	table->bt_l0_dma_addr = NULL;
912 }
913 
914 void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
915 				struct hns_roce_hem_table *table)
916 {
917 	struct device *dev = hr_dev->dev;
918 	unsigned long i;
919 
920 	if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
921 		hns_roce_cleanup_mhop_hem_table(hr_dev, table);
922 		return;
923 	}
924 
925 	for (i = 0; i < table->num_hem; ++i)
926 		if (table->hem[i]) {
927 			if (hr_dev->hw->clear_hem(hr_dev, table,
928 			    i * table->table_chunk_size / table->obj_size, 0))
929 				dev_err(dev, "clear HEM base address failed.\n");
930 
931 			hns_roce_free_hem(hr_dev, table->hem[i]);
932 		}
933 
934 	kfree(table->hem);
935 }
936 
937 void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
938 {
939 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ)
940 		hns_roce_cleanup_hem_table(hr_dev,
941 					   &hr_dev->srq_table.table);
942 	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
943 	if (hr_dev->caps.qpc_timer_entry_sz)
944 		hns_roce_cleanup_hem_table(hr_dev,
945 					   &hr_dev->qpc_timer_table);
946 	if (hr_dev->caps.cqc_timer_entry_sz)
947 		hns_roce_cleanup_hem_table(hr_dev,
948 					   &hr_dev->cqc_timer_table);
949 	if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL)
950 		hns_roce_cleanup_hem_table(hr_dev,
951 					   &hr_dev->qp_table.sccc_table);
952 	if (hr_dev->caps.trrl_entry_sz)
953 		hns_roce_cleanup_hem_table(hr_dev,
954 					   &hr_dev->qp_table.trrl_table);
955 
956 	if (hr_dev->caps.gmv_entry_sz)
957 		hns_roce_cleanup_hem_table(hr_dev, &hr_dev->gmv_table);
958 
959 	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
960 	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
961 	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
962 }
963 
964 struct hns_roce_hem_item {
965 	struct list_head list; /* link all hems in the same bt level */
966 	struct list_head sibling; /* link all hems in last hop for mtt */
967 	void *addr;
968 	dma_addr_t dma_addr;
969 	size_t count; /* max ba numbers */
970 	int start; /* start buf offset in this hem */
971 	int end; /* end buf offset in this hem */
972 };
973 
974 /* All HEM items are linked in a tree structure */
975 struct hns_roce_hem_head {
976 	struct list_head branch[HNS_ROCE_MAX_BT_REGION];
977 	struct list_head root;
978 	struct list_head leaf;
979 };
980 
981 static struct hns_roce_hem_item *
982 hem_list_alloc_item(struct hns_roce_dev *hr_dev, int start, int end, int count,
983 		    bool exist_bt)
984 {
985 	struct hns_roce_hem_item *hem;
986 
987 	hem = kzalloc(sizeof(*hem), GFP_KERNEL);
988 	if (!hem)
989 		return NULL;
990 
991 	if (exist_bt) {
992 		hem->addr = dma_alloc_coherent(hr_dev->dev, count * BA_BYTE_LEN,
993 					       &hem->dma_addr, GFP_KERNEL);
994 		if (!hem->addr) {
995 			kfree(hem);
996 			return NULL;
997 		}
998 	}
999 
1000 	hem->count = count;
1001 	hem->start = start;
1002 	hem->end = end;
1003 	INIT_LIST_HEAD(&hem->list);
1004 	INIT_LIST_HEAD(&hem->sibling);
1005 
1006 	return hem;
1007 }
1008 
1009 static void hem_list_free_item(struct hns_roce_dev *hr_dev,
1010 			       struct hns_roce_hem_item *hem, bool exist_bt)
1011 {
1012 	if (exist_bt)
1013 		dma_free_coherent(hr_dev->dev, hem->count * BA_BYTE_LEN,
1014 				  hem->addr, hem->dma_addr);
1015 	kfree(hem);
1016 }
1017 
1018 static void hem_list_free_all(struct hns_roce_dev *hr_dev,
1019 			      struct list_head *head, bool exist_bt)
1020 {
1021 	struct hns_roce_hem_item *hem, *temp_hem;
1022 
1023 	list_for_each_entry_safe(hem, temp_hem, head, list) {
1024 		list_del(&hem->list);
1025 		hem_list_free_item(hr_dev, hem, exist_bt);
1026 	}
1027 }
1028 
1029 static void hem_list_link_bt(struct hns_roce_dev *hr_dev, void *base_addr,
1030 			     u64 table_addr)
1031 {
1032 	*(u64 *)(base_addr) = table_addr;
1033 }
1034 
1035 /* assign L0 table address to hem from root bt */
1036 static void hem_list_assign_bt(struct hns_roce_dev *hr_dev,
1037 			       struct hns_roce_hem_item *hem, void *cpu_addr,
1038 			       u64 phy_addr)
1039 {
1040 	hem->addr = cpu_addr;
1041 	hem->dma_addr = (dma_addr_t)phy_addr;
1042 }
1043 
1044 static inline bool hem_list_page_is_in_range(struct hns_roce_hem_item *hem,
1045 					     int offset)
1046 {
1047 	return (hem->start <= offset && offset <= hem->end);
1048 }
1049 
1050 static struct hns_roce_hem_item *hem_list_search_item(struct list_head *ba_list,
1051 						      int page_offset)
1052 {
1053 	struct hns_roce_hem_item *hem, *temp_hem;
1054 	struct hns_roce_hem_item *found = NULL;
1055 
1056 	list_for_each_entry_safe(hem, temp_hem, ba_list, list) {
1057 		if (hem_list_page_is_in_range(hem, page_offset)) {
1058 			found = hem;
1059 			break;
1060 		}
1061 	}
1062 
1063 	return found;
1064 }
1065 
1066 static bool hem_list_is_bottom_bt(int hopnum, int bt_level)
1067 {
1068 	/*
1069 	 * hopnum    base address table levels
1070 	 * 0		L0(buf)
1071 	 * 1		L0 -> buf
1072 	 * 2		L0 -> L1 -> buf
1073 	 * 3		L0 -> L1 -> L2 -> buf
1074 	 */
1075 	return bt_level >= (hopnum ? hopnum - 1 : hopnum);
1076 }
1077 
1078 /*
1079  * calc base address entries num
1080  * @hopnum: num of mutihop addressing
1081  * @bt_level: base address table level
1082  * @unit: ba entries per bt page
1083  */
1084 static u32 hem_list_calc_ba_range(int hopnum, int bt_level, int unit)
1085 {
1086 	u32 step;
1087 	int max;
1088 	int i;
1089 
1090 	if (hopnum <= bt_level)
1091 		return 0;
1092 	/*
1093 	 * hopnum  bt_level   range
1094 	 * 1	      0       unit
1095 	 * ------------
1096 	 * 2	      0       unit * unit
1097 	 * 2	      1       unit
1098 	 * ------------
1099 	 * 3	      0       unit * unit * unit
1100 	 * 3	      1       unit * unit
1101 	 * 3	      2       unit
1102 	 */
1103 	step = 1;
1104 	max = hopnum - bt_level;
1105 	for (i = 0; i < max; i++)
1106 		step = step * unit;
1107 
1108 	return step;
1109 }
1110 
1111 /*
1112  * calc the root ba entries which could cover all regions
1113  * @regions: buf region array
1114  * @region_cnt: array size of @regions
1115  * @unit: ba entries per bt page
1116  */
1117 int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions,
1118 				   int region_cnt, int unit)
1119 {
1120 	struct hns_roce_buf_region *r;
1121 	int total = 0;
1122 	int step;
1123 	int i;
1124 
1125 	for (i = 0; i < region_cnt; i++) {
1126 		r = (struct hns_roce_buf_region *)&regions[i];
1127 		if (r->hopnum > 1) {
1128 			step = hem_list_calc_ba_range(r->hopnum, 1, unit);
1129 			if (step > 0)
1130 				total += (r->count + step - 1) / step;
1131 		} else {
1132 			total += r->count;
1133 		}
1134 	}
1135 
1136 	return total;
1137 }
1138 
1139 static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
1140 				 const struct hns_roce_buf_region *r, int unit,
1141 				 int offset, struct list_head *mid_bt,
1142 				 struct list_head *btm_bt)
1143 {
1144 	struct hns_roce_hem_item *hem_ptrs[HNS_ROCE_MAX_BT_LEVEL] = { NULL };
1145 	struct list_head temp_list[HNS_ROCE_MAX_BT_LEVEL];
1146 	struct hns_roce_hem_item *cur, *pre;
1147 	const int hopnum = r->hopnum;
1148 	int start_aligned;
1149 	int distance;
1150 	int ret = 0;
1151 	int max_ofs;
1152 	int level;
1153 	u32 step;
1154 	int end;
1155 
1156 	if (hopnum <= 1)
1157 		return 0;
1158 
1159 	if (hopnum > HNS_ROCE_MAX_BT_LEVEL) {
1160 		dev_err(hr_dev->dev, "invalid hopnum %d!\n", hopnum);
1161 		return -EINVAL;
1162 	}
1163 
1164 	if (offset < r->offset) {
1165 		dev_err(hr_dev->dev, "invalid offset %d, min %u!\n",
1166 			offset, r->offset);
1167 		return -EINVAL;
1168 	}
1169 
1170 	distance = offset - r->offset;
1171 	max_ofs = r->offset + r->count - 1;
1172 	for (level = 0; level < hopnum; level++)
1173 		INIT_LIST_HEAD(&temp_list[level]);
1174 
1175 	/* config L1 bt to last bt and link them to corresponding parent */
1176 	for (level = 1; level < hopnum; level++) {
1177 		cur = hem_list_search_item(&mid_bt[level], offset);
1178 		if (cur) {
1179 			hem_ptrs[level] = cur;
1180 			continue;
1181 		}
1182 
1183 		step = hem_list_calc_ba_range(hopnum, level, unit);
1184 		if (step < 1) {
1185 			ret = -EINVAL;
1186 			goto err_exit;
1187 		}
1188 
1189 		start_aligned = (distance / step) * step + r->offset;
1190 		end = min_t(int, start_aligned + step - 1, max_ofs);
1191 		cur = hem_list_alloc_item(hr_dev, start_aligned, end, unit,
1192 					  true);
1193 		if (!cur) {
1194 			ret = -ENOMEM;
1195 			goto err_exit;
1196 		}
1197 		hem_ptrs[level] = cur;
1198 		list_add(&cur->list, &temp_list[level]);
1199 		if (hem_list_is_bottom_bt(hopnum, level))
1200 			list_add(&cur->sibling, &temp_list[0]);
1201 
1202 		/* link bt to parent bt */
1203 		if (level > 1) {
1204 			pre = hem_ptrs[level - 1];
1205 			step = (cur->start - pre->start) / step * BA_BYTE_LEN;
1206 			hem_list_link_bt(hr_dev, pre->addr + step,
1207 					 cur->dma_addr);
1208 		}
1209 	}
1210 
1211 	list_splice(&temp_list[0], btm_bt);
1212 	for (level = 1; level < hopnum; level++)
1213 		list_splice(&temp_list[level], &mid_bt[level]);
1214 
1215 	return 0;
1216 
1217 err_exit:
1218 	for (level = 1; level < hopnum; level++)
1219 		hem_list_free_all(hr_dev, &temp_list[level], true);
1220 
1221 	return ret;
1222 }
1223 
1224 static struct hns_roce_hem_item *
1225 alloc_root_hem(struct hns_roce_dev *hr_dev, int unit, int *max_ba_num,
1226 	       const struct hns_roce_buf_region *regions, int region_cnt)
1227 {
1228 	const struct hns_roce_buf_region *r;
1229 	struct hns_roce_hem_item *hem;
1230 	int ba_num;
1231 	int offset;
1232 
1233 	ba_num = hns_roce_hem_list_calc_root_ba(regions, region_cnt, unit);
1234 	if (ba_num < 1)
1235 		return ERR_PTR(-ENOMEM);
1236 
1237 	if (ba_num > unit)
1238 		return ERR_PTR(-ENOBUFS);
1239 
1240 	offset = regions[0].offset;
1241 	/* indicate to last region */
1242 	r = &regions[region_cnt - 1];
1243 	hem = hem_list_alloc_item(hr_dev, offset, r->offset + r->count - 1,
1244 				  ba_num, true);
1245 	if (!hem)
1246 		return ERR_PTR(-ENOMEM);
1247 
1248 	*max_ba_num = ba_num;
1249 
1250 	return hem;
1251 }
1252 
1253 static int alloc_fake_root_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
1254 			      u64 phy_base, const struct hns_roce_buf_region *r,
1255 			      struct list_head *branch_head,
1256 			      struct list_head *leaf_head)
1257 {
1258 	struct hns_roce_hem_item *hem;
1259 
1260 	hem = hem_list_alloc_item(hr_dev, r->offset, r->offset + r->count - 1,
1261 				  r->count, false);
1262 	if (!hem)
1263 		return -ENOMEM;
1264 
1265 	hem_list_assign_bt(hr_dev, hem, cpu_base, phy_base);
1266 	list_add(&hem->list, branch_head);
1267 	list_add(&hem->sibling, leaf_head);
1268 
1269 	return r->count;
1270 }
1271 
1272 static int setup_middle_bt(struct hns_roce_dev *hr_dev, void *cpu_base,
1273 			   int unit, const struct hns_roce_buf_region *r,
1274 			   const struct list_head *branch_head)
1275 {
1276 	struct hns_roce_hem_item *hem, *temp_hem;
1277 	int total = 0;
1278 	int offset;
1279 	int step;
1280 
1281 	step = hem_list_calc_ba_range(r->hopnum, 1, unit);
1282 	if (step < 1)
1283 		return -EINVAL;
1284 
1285 	/* if exist mid bt, link L1 to L0 */
1286 	list_for_each_entry_safe(hem, temp_hem, branch_head, list) {
1287 		offset = (hem->start - r->offset) / step * BA_BYTE_LEN;
1288 		hem_list_link_bt(hr_dev, cpu_base + offset, hem->dma_addr);
1289 		total++;
1290 	}
1291 
1292 	return total;
1293 }
1294 
1295 static int
1296 setup_root_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem_list *hem_list,
1297 	       int unit, int max_ba_num, struct hns_roce_hem_head *head,
1298 	       const struct hns_roce_buf_region *regions, int region_cnt)
1299 {
1300 	const struct hns_roce_buf_region *r;
1301 	struct hns_roce_hem_item *root_hem;
1302 	void *cpu_base;
1303 	u64 phy_base;
1304 	int i, total;
1305 	int ret;
1306 
1307 	root_hem = list_first_entry(&head->root,
1308 				    struct hns_roce_hem_item, list);
1309 	if (!root_hem)
1310 		return -ENOMEM;
1311 
1312 	total = 0;
1313 	for (i = 0; i < region_cnt && total < max_ba_num; i++) {
1314 		r = &regions[i];
1315 		if (!r->count)
1316 			continue;
1317 
1318 		/* all regions's mid[x][0] shared the root_bt's trunk */
1319 		cpu_base = root_hem->addr + total * BA_BYTE_LEN;
1320 		phy_base = root_hem->dma_addr + total * BA_BYTE_LEN;
1321 
1322 		/* if hopnum is 0 or 1, cut a new fake hem from the root bt
1323 		 * which's address share to all regions.
1324 		 */
1325 		if (hem_list_is_bottom_bt(r->hopnum, 0))
1326 			ret = alloc_fake_root_bt(hr_dev, cpu_base, phy_base, r,
1327 						 &head->branch[i], &head->leaf);
1328 		else
1329 			ret = setup_middle_bt(hr_dev, cpu_base, unit, r,
1330 					      &hem_list->mid_bt[i][1]);
1331 
1332 		if (ret < 0)
1333 			return ret;
1334 
1335 		total += ret;
1336 	}
1337 
1338 	list_splice(&head->leaf, &hem_list->btm_bt);
1339 	list_splice(&head->root, &hem_list->root_bt);
1340 	for (i = 0; i < region_cnt; i++)
1341 		list_splice(&head->branch[i], &hem_list->mid_bt[i][0]);
1342 
1343 	return 0;
1344 }
1345 
1346 static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
1347 				  struct hns_roce_hem_list *hem_list, int unit,
1348 				  const struct hns_roce_buf_region *regions,
1349 				  int region_cnt)
1350 {
1351 	struct hns_roce_hem_item *root_hem;
1352 	struct hns_roce_hem_head head;
1353 	int max_ba_num;
1354 	int ret;
1355 	int i;
1356 
1357 	root_hem = hem_list_search_item(&hem_list->root_bt, regions[0].offset);
1358 	if (root_hem)
1359 		return 0;
1360 
1361 	max_ba_num = 0;
1362 	root_hem = alloc_root_hem(hr_dev, unit, &max_ba_num, regions,
1363 				  region_cnt);
1364 	if (IS_ERR(root_hem))
1365 		return PTR_ERR(root_hem);
1366 
1367 	/* List head for storing all allocated HEM items */
1368 	INIT_LIST_HEAD(&head.root);
1369 	INIT_LIST_HEAD(&head.leaf);
1370 	for (i = 0; i < region_cnt; i++)
1371 		INIT_LIST_HEAD(&head.branch[i]);
1372 
1373 	hem_list->root_ba = root_hem->dma_addr;
1374 	list_add(&root_hem->list, &head.root);
1375 	ret = setup_root_hem(hr_dev, hem_list, unit, max_ba_num, &head, regions,
1376 			     region_cnt);
1377 	if (ret) {
1378 		for (i = 0; i < region_cnt; i++)
1379 			hem_list_free_all(hr_dev, &head.branch[i], false);
1380 
1381 		hem_list_free_all(hr_dev, &head.root, true);
1382 	}
1383 
1384 	return ret;
1385 }
1386 
1387 /* construct the base address table and link them by address hop config */
1388 int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
1389 			      struct hns_roce_hem_list *hem_list,
1390 			      const struct hns_roce_buf_region *regions,
1391 			      int region_cnt, unsigned int bt_pg_shift)
1392 {
1393 	const struct hns_roce_buf_region *r;
1394 	int ofs, end;
1395 	int unit;
1396 	int ret;
1397 	int i;
1398 
1399 	if (region_cnt > HNS_ROCE_MAX_BT_REGION) {
1400 		dev_err(hr_dev->dev, "invalid region region_cnt %d!\n",
1401 			region_cnt);
1402 		return -EINVAL;
1403 	}
1404 
1405 	unit = (1 << bt_pg_shift) / BA_BYTE_LEN;
1406 	for (i = 0; i < region_cnt; i++) {
1407 		r = &regions[i];
1408 		if (!r->count)
1409 			continue;
1410 
1411 		end = r->offset + r->count;
1412 		for (ofs = r->offset; ofs < end; ofs += unit) {
1413 			ret = hem_list_alloc_mid_bt(hr_dev, r, unit, ofs,
1414 						    hem_list->mid_bt[i],
1415 						    &hem_list->btm_bt);
1416 			if (ret) {
1417 				dev_err(hr_dev->dev,
1418 					"alloc hem trunk fail ret = %d!\n", ret);
1419 				goto err_alloc;
1420 			}
1421 		}
1422 	}
1423 
1424 	ret = hem_list_alloc_root_bt(hr_dev, hem_list, unit, regions,
1425 				     region_cnt);
1426 	if (ret)
1427 		dev_err(hr_dev->dev, "alloc hem root fail ret = %d!\n", ret);
1428 	else
1429 		return 0;
1430 
1431 err_alloc:
1432 	hns_roce_hem_list_release(hr_dev, hem_list);
1433 
1434 	return ret;
1435 }
1436 
1437 void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,
1438 			       struct hns_roce_hem_list *hem_list)
1439 {
1440 	int i, j;
1441 
1442 	for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++)
1443 		for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++)
1444 			hem_list_free_all(hr_dev, &hem_list->mid_bt[i][j],
1445 					  j != 0);
1446 
1447 	hem_list_free_all(hr_dev, &hem_list->root_bt, true);
1448 	INIT_LIST_HEAD(&hem_list->btm_bt);
1449 	hem_list->root_ba = 0;
1450 }
1451 
1452 void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list)
1453 {
1454 	int i, j;
1455 
1456 	INIT_LIST_HEAD(&hem_list->root_bt);
1457 	INIT_LIST_HEAD(&hem_list->btm_bt);
1458 	for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++)
1459 		for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++)
1460 			INIT_LIST_HEAD(&hem_list->mid_bt[i][j]);
1461 }
1462 
1463 void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
1464 				 struct hns_roce_hem_list *hem_list,
1465 				 int offset, int *mtt_cnt)
1466 {
1467 	struct list_head *head = &hem_list->btm_bt;
1468 	struct hns_roce_hem_item *hem, *temp_hem;
1469 	void *cpu_base = NULL;
1470 	int nr = 0;
1471 
1472 	list_for_each_entry_safe(hem, temp_hem, head, sibling) {
1473 		if (hem_list_page_is_in_range(hem, offset)) {
1474 			nr = offset - hem->start;
1475 			cpu_base = hem->addr + nr * BA_BYTE_LEN;
1476 			nr = hem->end + 1 - offset;
1477 			break;
1478 		}
1479 	}
1480 
1481 	if (mtt_cnt)
1482 		*mtt_cnt = nr;
1483 
1484 	return cpu_base;
1485 }
1486