1 /*
2  * Copyright (c) 2016 Hisilicon Limited.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/platform_device.h>
35 #include "hns_roce_device.h"
36 #include "hns_roce_hem.h"
37 #include "hns_roce_common.h"
38 
39 #define DMA_ADDR_T_SHIFT		12
40 #define BT_BA_SHIFT			32
41 
42 bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
43 {
44 	if ((hr_dev->caps.qpc_hop_num && type == HEM_TYPE_QPC) ||
45 	    (hr_dev->caps.mpt_hop_num && type == HEM_TYPE_MTPT) ||
46 	    (hr_dev->caps.cqc_hop_num && type == HEM_TYPE_CQC) ||
47 	    (hr_dev->caps.srqc_hop_num && type == HEM_TYPE_SRQC) ||
48 	    (hr_dev->caps.sccc_hop_num && type == HEM_TYPE_SCCC) ||
49 	    (hr_dev->caps.qpc_timer_hop_num && type == HEM_TYPE_QPC_TIMER) ||
50 	    (hr_dev->caps.cqc_timer_hop_num && type == HEM_TYPE_CQC_TIMER) ||
51 	    (hr_dev->caps.cqe_hop_num && type == HEM_TYPE_CQE) ||
52 	    (hr_dev->caps.mtt_hop_num && type == HEM_TYPE_MTT) ||
53 	    (hr_dev->caps.srqwqe_hop_num && type == HEM_TYPE_SRQWQE) ||
54 	    (hr_dev->caps.idx_hop_num && type == HEM_TYPE_IDX))
55 		return true;
56 
57 	return false;
58 }
59 
60 static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 start_idx,
61 			    u32 bt_chunk_num)
62 {
63 	int i;
64 
65 	for (i = 0; i < bt_chunk_num; i++)
66 		if (hem[start_idx + i])
67 			return false;
68 
69 	return true;
70 }
71 
72 static bool hns_roce_check_bt_null(u64 **bt, u64 start_idx, u32 bt_chunk_num)
73 {
74 	int i;
75 
76 	for (i = 0; i < bt_chunk_num; i++)
77 		if (bt[start_idx + i])
78 			return false;
79 
80 	return true;
81 }
82 
83 static int hns_roce_get_bt_num(u32 table_type, u32 hop_num)
84 {
85 	if (check_whether_bt_num_3(table_type, hop_num))
86 		return 3;
87 	else if (check_whether_bt_num_2(table_type, hop_num))
88 		return 2;
89 	else if (check_whether_bt_num_1(table_type, hop_num))
90 		return 1;
91 	else
92 		return 0;
93 }
94 
95 int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
96 			   struct hns_roce_hem_table *table, unsigned long *obj,
97 			   struct hns_roce_hem_mhop *mhop)
98 {
99 	struct device *dev = hr_dev->dev;
100 	u32 chunk_ba_num;
101 	u32 table_idx;
102 	u32 bt_num;
103 	u32 chunk_size;
104 
105 	switch (table->type) {
106 	case HEM_TYPE_QPC:
107 		mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz
108 					     + PAGE_SHIFT);
109 		mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_ba_pg_sz
110 					     + PAGE_SHIFT);
111 		mhop->ba_l0_num = hr_dev->caps.qpc_bt_num;
112 		mhop->hop_num = hr_dev->caps.qpc_hop_num;
113 		break;
114 	case HEM_TYPE_MTPT:
115 		mhop->buf_chunk_size = 1 << (hr_dev->caps.mpt_buf_pg_sz
116 					     + PAGE_SHIFT);
117 		mhop->bt_chunk_size = 1 << (hr_dev->caps.mpt_ba_pg_sz
118 					     + PAGE_SHIFT);
119 		mhop->ba_l0_num = hr_dev->caps.mpt_bt_num;
120 		mhop->hop_num = hr_dev->caps.mpt_hop_num;
121 		break;
122 	case HEM_TYPE_CQC:
123 		mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_buf_pg_sz
124 					     + PAGE_SHIFT);
125 		mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_ba_pg_sz
126 					    + PAGE_SHIFT);
127 		mhop->ba_l0_num = hr_dev->caps.cqc_bt_num;
128 		mhop->hop_num = hr_dev->caps.cqc_hop_num;
129 		break;
130 	case HEM_TYPE_SCCC:
131 		mhop->buf_chunk_size = 1 << (hr_dev->caps.sccc_buf_pg_sz
132 					     + PAGE_SHIFT);
133 		mhop->bt_chunk_size = 1 << (hr_dev->caps.sccc_ba_pg_sz
134 					    + PAGE_SHIFT);
135 		mhop->ba_l0_num = hr_dev->caps.sccc_bt_num;
136 		mhop->hop_num = hr_dev->caps.sccc_hop_num;
137 		break;
138 	case HEM_TYPE_QPC_TIMER:
139 		mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz
140 					     + PAGE_SHIFT);
141 		mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz
142 					    + PAGE_SHIFT);
143 		mhop->ba_l0_num = hr_dev->caps.qpc_timer_bt_num;
144 		mhop->hop_num = hr_dev->caps.qpc_timer_hop_num;
145 		break;
146 	case HEM_TYPE_CQC_TIMER:
147 		mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz
148 					     + PAGE_SHIFT);
149 		mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz
150 					    + PAGE_SHIFT);
151 		mhop->ba_l0_num = hr_dev->caps.cqc_timer_bt_num;
152 		mhop->hop_num = hr_dev->caps.cqc_timer_hop_num;
153 		break;
154 	case HEM_TYPE_SRQC:
155 		mhop->buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
156 					     + PAGE_SHIFT);
157 		mhop->bt_chunk_size = 1 << (hr_dev->caps.srqc_ba_pg_sz
158 					     + PAGE_SHIFT);
159 		mhop->ba_l0_num = hr_dev->caps.srqc_bt_num;
160 		mhop->hop_num = hr_dev->caps.srqc_hop_num;
161 		break;
162 	case HEM_TYPE_MTT:
163 		mhop->buf_chunk_size = 1 << (hr_dev->caps.mtt_buf_pg_sz
164 					     + PAGE_SHIFT);
165 		mhop->bt_chunk_size = 1 << (hr_dev->caps.mtt_ba_pg_sz
166 					     + PAGE_SHIFT);
167 		mhop->ba_l0_num = mhop->bt_chunk_size / BA_BYTE_LEN;
168 		mhop->hop_num = hr_dev->caps.mtt_hop_num;
169 		break;
170 	case HEM_TYPE_CQE:
171 		mhop->buf_chunk_size = 1 << (hr_dev->caps.cqe_buf_pg_sz
172 					     + PAGE_SHIFT);
173 		mhop->bt_chunk_size = 1 << (hr_dev->caps.cqe_ba_pg_sz
174 					     + PAGE_SHIFT);
175 		mhop->ba_l0_num = mhop->bt_chunk_size / BA_BYTE_LEN;
176 		mhop->hop_num = hr_dev->caps.cqe_hop_num;
177 		break;
178 	case HEM_TYPE_SRQWQE:
179 		mhop->buf_chunk_size = 1 << (hr_dev->caps.srqwqe_buf_pg_sz
180 					    + PAGE_SHIFT);
181 		mhop->bt_chunk_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz
182 					    + PAGE_SHIFT);
183 		mhop->ba_l0_num = mhop->bt_chunk_size / BA_BYTE_LEN;
184 		mhop->hop_num = hr_dev->caps.srqwqe_hop_num;
185 		break;
186 	case HEM_TYPE_IDX:
187 		mhop->buf_chunk_size = 1 << (hr_dev->caps.idx_buf_pg_sz
188 				       + PAGE_SHIFT);
189 		mhop->bt_chunk_size = 1 << (hr_dev->caps.idx_ba_pg_sz
190 				       + PAGE_SHIFT);
191 		mhop->ba_l0_num = mhop->bt_chunk_size / BA_BYTE_LEN;
192 		mhop->hop_num = hr_dev->caps.idx_hop_num;
193 		break;
194 	default:
195 		dev_err(dev, "Table %d not support multi-hop addressing!\n",
196 			 table->type);
197 		return -EINVAL;
198 	}
199 
200 	if (!obj)
201 		return 0;
202 
203 	/*
204 	 * QPC/MTPT/CQC/SRQC/SCCC alloc hem for buffer pages.
205 	 * MTT/CQE alloc hem for bt pages.
206 	 */
207 	bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
208 	chunk_ba_num = mhop->bt_chunk_size / BA_BYTE_LEN;
209 	chunk_size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size :
210 			      mhop->bt_chunk_size;
211 	table_idx = (*obj & (table->num_obj - 1)) /
212 		     (chunk_size / table->obj_size);
213 	switch (bt_num) {
214 	case 3:
215 		mhop->l2_idx = table_idx & (chunk_ba_num - 1);
216 		mhop->l1_idx = table_idx / chunk_ba_num & (chunk_ba_num - 1);
217 		mhop->l0_idx = (table_idx / chunk_ba_num) / chunk_ba_num;
218 		break;
219 	case 2:
220 		mhop->l1_idx = table_idx & (chunk_ba_num - 1);
221 		mhop->l0_idx = table_idx / chunk_ba_num;
222 		break;
223 	case 1:
224 		mhop->l0_idx = table_idx;
225 		break;
226 	default:
227 		dev_err(dev, "Table %d not support hop_num = %d!\n",
228 			     table->type, mhop->hop_num);
229 		return -EINVAL;
230 	}
231 	if (mhop->l0_idx >= mhop->ba_l0_num)
232 		mhop->l0_idx %= mhop->ba_l0_num;
233 
234 	return 0;
235 }
236 
237 static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
238 					       int npages,
239 					       unsigned long hem_alloc_size,
240 					       gfp_t gfp_mask)
241 {
242 	struct hns_roce_hem_chunk *chunk = NULL;
243 	struct hns_roce_hem *hem;
244 	struct scatterlist *mem;
245 	int order;
246 	void *buf;
247 
248 	WARN_ON(gfp_mask & __GFP_HIGHMEM);
249 
250 	hem = kmalloc(sizeof(*hem),
251 		      gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
252 	if (!hem)
253 		return NULL;
254 
255 	hem->refcount = 0;
256 	INIT_LIST_HEAD(&hem->chunk_list);
257 
258 	order = get_order(hem_alloc_size);
259 
260 	while (npages > 0) {
261 		if (!chunk) {
262 			chunk = kmalloc(sizeof(*chunk),
263 				gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
264 			if (!chunk)
265 				goto fail;
266 
267 			sg_init_table(chunk->mem, HNS_ROCE_HEM_CHUNK_LEN);
268 			chunk->npages = 0;
269 			chunk->nsg = 0;
270 			memset(chunk->buf, 0, sizeof(chunk->buf));
271 			list_add_tail(&chunk->list, &hem->chunk_list);
272 		}
273 
274 		while (1 << order > npages)
275 			--order;
276 
277 		/*
278 		 * Alloc memory one time. If failed, don't alloc small block
279 		 * memory, directly return fail.
280 		 */
281 		mem = &chunk->mem[chunk->npages];
282 		buf = dma_alloc_coherent(hr_dev->dev, PAGE_SIZE << order,
283 				&sg_dma_address(mem), gfp_mask);
284 		if (!buf)
285 			goto fail;
286 
287 		chunk->buf[chunk->npages] = buf;
288 		sg_dma_len(mem) = PAGE_SIZE << order;
289 
290 		++chunk->npages;
291 		++chunk->nsg;
292 		npages -= 1 << order;
293 	}
294 
295 	return hem;
296 
297 fail:
298 	hns_roce_free_hem(hr_dev, hem);
299 	return NULL;
300 }
301 
302 void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
303 {
304 	struct hns_roce_hem_chunk *chunk, *tmp;
305 	int i;
306 
307 	if (!hem)
308 		return;
309 
310 	list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) {
311 		for (i = 0; i < chunk->npages; ++i)
312 			dma_free_coherent(hr_dev->dev,
313 				   sg_dma_len(&chunk->mem[i]),
314 				   chunk->buf[i],
315 				   sg_dma_address(&chunk->mem[i]));
316 		kfree(chunk);
317 	}
318 
319 	kfree(hem);
320 }
321 
322 static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
323 			    struct hns_roce_hem_table *table, unsigned long obj)
324 {
325 	spinlock_t *lock = &hr_dev->bt_cmd_lock;
326 	struct device *dev = hr_dev->dev;
327 	unsigned long end = 0;
328 	unsigned long flags;
329 	struct hns_roce_hem_iter iter;
330 	void __iomem *bt_cmd;
331 	u32 bt_cmd_h_val = 0;
332 	u32 bt_cmd_val[2];
333 	u32 bt_cmd_l = 0;
334 	u64 bt_ba = 0;
335 	int ret = 0;
336 
337 	/* Find the HEM(Hardware Entry Memory) entry */
338 	unsigned long i = (obj & (table->num_obj - 1)) /
339 			  (table->table_chunk_size / table->obj_size);
340 
341 	switch (table->type) {
342 	case HEM_TYPE_QPC:
343 		roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
344 			       ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC);
345 		break;
346 	case HEM_TYPE_MTPT:
347 		roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
348 			       ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
349 			       HEM_TYPE_MTPT);
350 		break;
351 	case HEM_TYPE_CQC:
352 		roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
353 			       ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC);
354 		break;
355 	case HEM_TYPE_SRQC:
356 		roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
357 			       ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
358 			       HEM_TYPE_SRQC);
359 		break;
360 	default:
361 		return ret;
362 	}
363 	roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
364 		       ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
365 	roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
366 	roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
367 
368 	/* Currently iter only a chunk */
369 	for (hns_roce_hem_first(table->hem[i], &iter);
370 	     !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
371 		bt_ba = hns_roce_hem_addr(&iter) >> DMA_ADDR_T_SHIFT;
372 
373 		spin_lock_irqsave(lock, flags);
374 
375 		bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
376 
377 		end = HW_SYNC_TIMEOUT_MSECS;
378 		while (end) {
379 			if (!(readl(bt_cmd) >> BT_CMD_SYNC_SHIFT))
380 				break;
381 
382 			mdelay(HW_SYNC_SLEEP_TIME_INTERVAL);
383 			end -= HW_SYNC_SLEEP_TIME_INTERVAL;
384 		}
385 
386 		if (end <= 0) {
387 			dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
388 			spin_unlock_irqrestore(lock, flags);
389 			return -EBUSY;
390 		}
391 
392 		bt_cmd_l = (u32)bt_ba;
393 		roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
394 			       ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S,
395 			       bt_ba >> BT_BA_SHIFT);
396 
397 		bt_cmd_val[0] = bt_cmd_l;
398 		bt_cmd_val[1] = bt_cmd_h_val;
399 		hns_roce_write64_k(bt_cmd_val,
400 				   hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
401 		spin_unlock_irqrestore(lock, flags);
402 	}
403 
404 	return ret;
405 }
406 
407 static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
408 				   struct hns_roce_hem_table *table,
409 				   unsigned long obj)
410 {
411 	struct device *dev = hr_dev->dev;
412 	struct hns_roce_hem_mhop mhop;
413 	struct hns_roce_hem_iter iter;
414 	u32 buf_chunk_size;
415 	u32 bt_chunk_size;
416 	u32 chunk_ba_num;
417 	u32 hop_num;
418 	u32 size;
419 	u32 bt_num;
420 	u64 hem_idx;
421 	u64 bt_l1_idx = 0;
422 	u64 bt_l0_idx = 0;
423 	u64 bt_ba;
424 	unsigned long mhop_obj = obj;
425 	int bt_l1_allocated = 0;
426 	int bt_l0_allocated = 0;
427 	int step_idx;
428 	int ret;
429 
430 	ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
431 	if (ret)
432 		return ret;
433 
434 	buf_chunk_size = mhop.buf_chunk_size;
435 	bt_chunk_size = mhop.bt_chunk_size;
436 	hop_num = mhop.hop_num;
437 	chunk_ba_num = bt_chunk_size / BA_BYTE_LEN;
438 
439 	bt_num = hns_roce_get_bt_num(table->type, hop_num);
440 	switch (bt_num) {
441 	case 3:
442 		hem_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num +
443 			  mhop.l1_idx * chunk_ba_num + mhop.l2_idx;
444 		bt_l1_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
445 		bt_l0_idx = mhop.l0_idx;
446 		break;
447 	case 2:
448 		hem_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
449 		bt_l0_idx = mhop.l0_idx;
450 		break;
451 	case 1:
452 		hem_idx = mhop.l0_idx;
453 		break;
454 	default:
455 		dev_err(dev, "Table %d not support hop_num = %d!\n",
456 			     table->type, hop_num);
457 		return -EINVAL;
458 	}
459 
460 	mutex_lock(&table->mutex);
461 
462 	if (table->hem[hem_idx]) {
463 		++table->hem[hem_idx]->refcount;
464 		goto out;
465 	}
466 
467 	/* alloc L1 BA's chunk */
468 	if ((check_whether_bt_num_3(table->type, hop_num) ||
469 		check_whether_bt_num_2(table->type, hop_num)) &&
470 		!table->bt_l0[bt_l0_idx]) {
471 		table->bt_l0[bt_l0_idx] = dma_alloc_coherent(dev, bt_chunk_size,
472 					    &(table->bt_l0_dma_addr[bt_l0_idx]),
473 					    GFP_KERNEL);
474 		if (!table->bt_l0[bt_l0_idx]) {
475 			ret = -ENOMEM;
476 			goto out;
477 		}
478 		bt_l0_allocated = 1;
479 
480 		/* set base address to hardware */
481 		if (table->type < HEM_TYPE_MTT) {
482 			step_idx = 0;
483 			if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) {
484 				ret = -ENODEV;
485 				dev_err(dev, "set HEM base address to HW failed!\n");
486 				goto err_dma_alloc_l1;
487 			}
488 		}
489 	}
490 
491 	/* alloc L2 BA's chunk */
492 	if (check_whether_bt_num_3(table->type, hop_num) &&
493 	    !table->bt_l1[bt_l1_idx])  {
494 		table->bt_l1[bt_l1_idx] = dma_alloc_coherent(dev, bt_chunk_size,
495 					    &(table->bt_l1_dma_addr[bt_l1_idx]),
496 					    GFP_KERNEL);
497 		if (!table->bt_l1[bt_l1_idx]) {
498 			ret = -ENOMEM;
499 			goto err_dma_alloc_l1;
500 		}
501 		bt_l1_allocated = 1;
502 		*(table->bt_l0[bt_l0_idx] + mhop.l1_idx) =
503 					       table->bt_l1_dma_addr[bt_l1_idx];
504 
505 		/* set base address to hardware */
506 		step_idx = 1;
507 		if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) {
508 			ret = -ENODEV;
509 			dev_err(dev, "set HEM base address to HW failed!\n");
510 			goto err_alloc_hem_buf;
511 		}
512 	}
513 
514 	/*
515 	 * alloc buffer space chunk for QPC/MTPT/CQC/SRQC/SCCC.
516 	 * alloc bt space chunk for MTT/CQE.
517 	 */
518 	size = table->type < HEM_TYPE_MTT ? buf_chunk_size : bt_chunk_size;
519 	table->hem[hem_idx] = hns_roce_alloc_hem(hr_dev,
520 						size >> PAGE_SHIFT,
521 						size,
522 						(table->lowmem ? GFP_KERNEL :
523 						GFP_HIGHUSER) | __GFP_NOWARN);
524 	if (!table->hem[hem_idx]) {
525 		ret = -ENOMEM;
526 		goto err_alloc_hem_buf;
527 	}
528 
529 	hns_roce_hem_first(table->hem[hem_idx], &iter);
530 	bt_ba = hns_roce_hem_addr(&iter);
531 
532 	if (table->type < HEM_TYPE_MTT) {
533 		if (hop_num == 2) {
534 			*(table->bt_l1[bt_l1_idx] + mhop.l2_idx) = bt_ba;
535 			step_idx = 2;
536 		} else if (hop_num == 1) {
537 			*(table->bt_l0[bt_l0_idx] + mhop.l1_idx) = bt_ba;
538 			step_idx = 1;
539 		} else if (hop_num == HNS_ROCE_HOP_NUM_0) {
540 			step_idx = 0;
541 		} else {
542 			ret = -EINVAL;
543 			goto err_dma_alloc_l1;
544 		}
545 
546 		/* set HEM base address to hardware */
547 		if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) {
548 			ret = -ENODEV;
549 			dev_err(dev, "set HEM base address to HW failed!\n");
550 			goto err_alloc_hem_buf;
551 		}
552 	} else if (hop_num == 2) {
553 		*(table->bt_l0[bt_l0_idx] + mhop.l1_idx) = bt_ba;
554 	}
555 
556 	++table->hem[hem_idx]->refcount;
557 	goto out;
558 
559 err_alloc_hem_buf:
560 	if (bt_l1_allocated) {
561 		dma_free_coherent(dev, bt_chunk_size, table->bt_l1[bt_l1_idx],
562 				  table->bt_l1_dma_addr[bt_l1_idx]);
563 		table->bt_l1[bt_l1_idx] = NULL;
564 	}
565 
566 err_dma_alloc_l1:
567 	if (bt_l0_allocated) {
568 		dma_free_coherent(dev, bt_chunk_size, table->bt_l0[bt_l0_idx],
569 				  table->bt_l0_dma_addr[bt_l0_idx]);
570 		table->bt_l0[bt_l0_idx] = NULL;
571 	}
572 
573 out:
574 	mutex_unlock(&table->mutex);
575 	return ret;
576 }
577 
578 int hns_roce_table_get(struct hns_roce_dev *hr_dev,
579 		       struct hns_roce_hem_table *table, unsigned long obj)
580 {
581 	struct device *dev = hr_dev->dev;
582 	int ret = 0;
583 	unsigned long i;
584 
585 	if (hns_roce_check_whether_mhop(hr_dev, table->type))
586 		return hns_roce_table_mhop_get(hr_dev, table, obj);
587 
588 	i = (obj & (table->num_obj - 1)) / (table->table_chunk_size /
589 	     table->obj_size);
590 
591 	mutex_lock(&table->mutex);
592 
593 	if (table->hem[i]) {
594 		++table->hem[i]->refcount;
595 		goto out;
596 	}
597 
598 	table->hem[i] = hns_roce_alloc_hem(hr_dev,
599 				       table->table_chunk_size >> PAGE_SHIFT,
600 				       table->table_chunk_size,
601 				       (table->lowmem ? GFP_KERNEL :
602 					GFP_HIGHUSER) | __GFP_NOWARN);
603 	if (!table->hem[i]) {
604 		ret = -ENOMEM;
605 		goto out;
606 	}
607 
608 	/* Set HEM base address(128K/page, pa) to Hardware */
609 	if (hns_roce_set_hem(hr_dev, table, obj)) {
610 		hns_roce_free_hem(hr_dev, table->hem[i]);
611 		table->hem[i] = NULL;
612 		ret = -ENODEV;
613 		dev_err(dev, "set HEM base address to HW failed.\n");
614 		goto out;
615 	}
616 
617 	++table->hem[i]->refcount;
618 out:
619 	mutex_unlock(&table->mutex);
620 	return ret;
621 }
622 
623 static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
624 				    struct hns_roce_hem_table *table,
625 				    unsigned long obj,
626 				    int check_refcount)
627 {
628 	struct device *dev = hr_dev->dev;
629 	struct hns_roce_hem_mhop mhop;
630 	unsigned long mhop_obj = obj;
631 	u32 bt_chunk_size;
632 	u32 chunk_ba_num;
633 	u32 hop_num;
634 	u32 start_idx;
635 	u32 bt_num;
636 	u64 hem_idx;
637 	u64 bt_l1_idx = 0;
638 	int ret;
639 
640 	ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
641 	if (ret)
642 		return;
643 
644 	bt_chunk_size = mhop.bt_chunk_size;
645 	hop_num = mhop.hop_num;
646 	chunk_ba_num = bt_chunk_size / BA_BYTE_LEN;
647 
648 	bt_num = hns_roce_get_bt_num(table->type, hop_num);
649 	switch (bt_num) {
650 	case 3:
651 		hem_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num +
652 			  mhop.l1_idx * chunk_ba_num + mhop.l2_idx;
653 		bt_l1_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
654 		break;
655 	case 2:
656 		hem_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
657 		break;
658 	case 1:
659 		hem_idx = mhop.l0_idx;
660 		break;
661 	default:
662 		dev_err(dev, "Table %d not support hop_num = %d!\n",
663 			     table->type, hop_num);
664 		return;
665 	}
666 
667 	mutex_lock(&table->mutex);
668 
669 	if (check_refcount && (--table->hem[hem_idx]->refcount > 0)) {
670 		mutex_unlock(&table->mutex);
671 		return;
672 	}
673 
674 	if (table->type < HEM_TYPE_MTT && hop_num == 1) {
675 		if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1))
676 			dev_warn(dev, "Clear HEM base address failed.\n");
677 	} else if (table->type < HEM_TYPE_MTT && hop_num == 2) {
678 		if (hr_dev->hw->clear_hem(hr_dev, table, obj, 2))
679 			dev_warn(dev, "Clear HEM base address failed.\n");
680 	} else if (table->type < HEM_TYPE_MTT &&
681 		   hop_num == HNS_ROCE_HOP_NUM_0) {
682 		if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
683 			dev_warn(dev, "Clear HEM base address failed.\n");
684 	}
685 
686 	/*
687 	 * free buffer space chunk for QPC/MTPT/CQC/SRQC/SCCC.
688 	 * free bt space chunk for MTT/CQE.
689 	 */
690 	hns_roce_free_hem(hr_dev, table->hem[hem_idx]);
691 	table->hem[hem_idx] = NULL;
692 
693 	if (check_whether_bt_num_2(table->type, hop_num)) {
694 		start_idx = mhop.l0_idx * chunk_ba_num;
695 		if (hns_roce_check_hem_null(table->hem, start_idx,
696 					    chunk_ba_num)) {
697 			if (table->type < HEM_TYPE_MTT &&
698 			    hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
699 				dev_warn(dev, "Clear HEM base address failed.\n");
700 
701 			dma_free_coherent(dev, bt_chunk_size,
702 					  table->bt_l0[mhop.l0_idx],
703 					  table->bt_l0_dma_addr[mhop.l0_idx]);
704 			table->bt_l0[mhop.l0_idx] = NULL;
705 		}
706 	} else if (check_whether_bt_num_3(table->type, hop_num)) {
707 		start_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num +
708 			    mhop.l1_idx * chunk_ba_num;
709 		if (hns_roce_check_hem_null(table->hem, start_idx,
710 					    chunk_ba_num)) {
711 			if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1))
712 				dev_warn(dev, "Clear HEM base address failed.\n");
713 
714 			dma_free_coherent(dev, bt_chunk_size,
715 					  table->bt_l1[bt_l1_idx],
716 					  table->bt_l1_dma_addr[bt_l1_idx]);
717 			table->bt_l1[bt_l1_idx] = NULL;
718 
719 			start_idx = mhop.l0_idx * chunk_ba_num;
720 			if (hns_roce_check_bt_null(table->bt_l1, start_idx,
721 						   chunk_ba_num)) {
722 				if (hr_dev->hw->clear_hem(hr_dev, table, obj,
723 							  0))
724 					dev_warn(dev, "Clear HEM base address failed.\n");
725 
726 				dma_free_coherent(dev, bt_chunk_size,
727 					    table->bt_l0[mhop.l0_idx],
728 					    table->bt_l0_dma_addr[mhop.l0_idx]);
729 				table->bt_l0[mhop.l0_idx] = NULL;
730 			}
731 		}
732 	}
733 
734 	mutex_unlock(&table->mutex);
735 }
736 
737 void hns_roce_table_put(struct hns_roce_dev *hr_dev,
738 			struct hns_roce_hem_table *table, unsigned long obj)
739 {
740 	struct device *dev = hr_dev->dev;
741 	unsigned long i;
742 
743 	if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
744 		hns_roce_table_mhop_put(hr_dev, table, obj, 1);
745 		return;
746 	}
747 
748 	i = (obj & (table->num_obj - 1)) /
749 	    (table->table_chunk_size / table->obj_size);
750 
751 	mutex_lock(&table->mutex);
752 
753 	if (--table->hem[i]->refcount == 0) {
754 		/* Clear HEM base address */
755 		if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
756 			dev_warn(dev, "Clear HEM base address failed.\n");
757 
758 		hns_roce_free_hem(hr_dev, table->hem[i]);
759 		table->hem[i] = NULL;
760 	}
761 
762 	mutex_unlock(&table->mutex);
763 }
764 
765 void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
766 			  struct hns_roce_hem_table *table,
767 			  unsigned long obj, dma_addr_t *dma_handle)
768 {
769 	struct hns_roce_hem_chunk *chunk;
770 	struct hns_roce_hem_mhop mhop;
771 	struct hns_roce_hem *hem;
772 	void *addr = NULL;
773 	unsigned long mhop_obj = obj;
774 	unsigned long obj_per_chunk;
775 	unsigned long idx_offset;
776 	int offset, dma_offset;
777 	int length;
778 	int i, j;
779 	u32 hem_idx = 0;
780 
781 	if (!table->lowmem)
782 		return NULL;
783 
784 	mutex_lock(&table->mutex);
785 
786 	if (!hns_roce_check_whether_mhop(hr_dev, table->type)) {
787 		obj_per_chunk = table->table_chunk_size / table->obj_size;
788 		hem = table->hem[(obj & (table->num_obj - 1)) / obj_per_chunk];
789 		idx_offset = (obj & (table->num_obj - 1)) % obj_per_chunk;
790 		dma_offset = offset = idx_offset * table->obj_size;
791 	} else {
792 		u32 seg_size = 64; /* 8 bytes per BA and 8 BA per segment */
793 
794 		hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
795 		/* mtt mhop */
796 		i = mhop.l0_idx;
797 		j = mhop.l1_idx;
798 		if (mhop.hop_num == 2)
799 			hem_idx = i * (mhop.bt_chunk_size / BA_BYTE_LEN) + j;
800 		else if (mhop.hop_num == 1 ||
801 			 mhop.hop_num == HNS_ROCE_HOP_NUM_0)
802 			hem_idx = i;
803 
804 		hem = table->hem[hem_idx];
805 		dma_offset = offset = (obj & (table->num_obj - 1)) * seg_size %
806 				       mhop.bt_chunk_size;
807 		if (mhop.hop_num == 2)
808 			dma_offset = offset = 0;
809 	}
810 
811 	if (!hem)
812 		goto out;
813 
814 	list_for_each_entry(chunk, &hem->chunk_list, list) {
815 		for (i = 0; i < chunk->npages; ++i) {
816 			length = sg_dma_len(&chunk->mem[i]);
817 			if (dma_handle && dma_offset >= 0) {
818 				if (length > (u32)dma_offset)
819 					*dma_handle = sg_dma_address(
820 						&chunk->mem[i]) + dma_offset;
821 				dma_offset -= length;
822 			}
823 
824 			if (length > (u32)offset) {
825 				addr = chunk->buf[i] + offset;
826 				goto out;
827 			}
828 			offset -= length;
829 		}
830 	}
831 
832 out:
833 	mutex_unlock(&table->mutex);
834 	return addr;
835 }
836 
837 int hns_roce_table_get_range(struct hns_roce_dev *hr_dev,
838 			     struct hns_roce_hem_table *table,
839 			     unsigned long start, unsigned long end)
840 {
841 	struct hns_roce_hem_mhop mhop;
842 	unsigned long inc = table->table_chunk_size / table->obj_size;
843 	unsigned long i;
844 	int ret;
845 
846 	if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
847 		hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
848 		inc = mhop.bt_chunk_size / table->obj_size;
849 	}
850 
851 	/* Allocate MTT entry memory according to chunk(128K) */
852 	for (i = start; i <= end; i += inc) {
853 		ret = hns_roce_table_get(hr_dev, table, i);
854 		if (ret)
855 			goto fail;
856 	}
857 
858 	return 0;
859 
860 fail:
861 	while (i > start) {
862 		i -= inc;
863 		hns_roce_table_put(hr_dev, table, i);
864 	}
865 	return ret;
866 }
867 
868 void hns_roce_table_put_range(struct hns_roce_dev *hr_dev,
869 			      struct hns_roce_hem_table *table,
870 			      unsigned long start, unsigned long end)
871 {
872 	struct hns_roce_hem_mhop mhop;
873 	unsigned long inc = table->table_chunk_size / table->obj_size;
874 	unsigned long i;
875 
876 	if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
877 		hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
878 		inc = mhop.bt_chunk_size / table->obj_size;
879 	}
880 
881 	for (i = start; i <= end; i += inc)
882 		hns_roce_table_put(hr_dev, table, i);
883 }
884 
885 int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
886 			    struct hns_roce_hem_table *table, u32 type,
887 			    unsigned long obj_size, unsigned long nobj,
888 			    int use_lowmem)
889 {
890 	struct device *dev = hr_dev->dev;
891 	unsigned long obj_per_chunk;
892 	unsigned long num_hem;
893 
894 	if (!hns_roce_check_whether_mhop(hr_dev, type)) {
895 		table->table_chunk_size = hr_dev->caps.chunk_sz;
896 		obj_per_chunk = table->table_chunk_size / obj_size;
897 		num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
898 
899 		table->hem = kcalloc(num_hem, sizeof(*table->hem), GFP_KERNEL);
900 		if (!table->hem)
901 			return -ENOMEM;
902 	} else {
903 		unsigned long buf_chunk_size;
904 		unsigned long bt_chunk_size;
905 		unsigned long bt_chunk_num;
906 		unsigned long num_bt_l0 = 0;
907 		u32 hop_num;
908 
909 		switch (type) {
910 		case HEM_TYPE_QPC:
911 			buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz
912 					+ PAGE_SHIFT);
913 			bt_chunk_size = 1 << (hr_dev->caps.qpc_ba_pg_sz
914 					+ PAGE_SHIFT);
915 			num_bt_l0 = hr_dev->caps.qpc_bt_num;
916 			hop_num = hr_dev->caps.qpc_hop_num;
917 			break;
918 		case HEM_TYPE_MTPT:
919 			buf_chunk_size = 1 << (hr_dev->caps.mpt_buf_pg_sz
920 					+ PAGE_SHIFT);
921 			bt_chunk_size = 1 << (hr_dev->caps.mpt_ba_pg_sz
922 					+ PAGE_SHIFT);
923 			num_bt_l0 = hr_dev->caps.mpt_bt_num;
924 			hop_num = hr_dev->caps.mpt_hop_num;
925 			break;
926 		case HEM_TYPE_CQC:
927 			buf_chunk_size = 1 << (hr_dev->caps.cqc_buf_pg_sz
928 					+ PAGE_SHIFT);
929 			bt_chunk_size = 1 << (hr_dev->caps.cqc_ba_pg_sz
930 					+ PAGE_SHIFT);
931 			num_bt_l0 = hr_dev->caps.cqc_bt_num;
932 			hop_num = hr_dev->caps.cqc_hop_num;
933 			break;
934 		case HEM_TYPE_SCCC:
935 			buf_chunk_size = 1 << (hr_dev->caps.sccc_buf_pg_sz
936 					+ PAGE_SHIFT);
937 			bt_chunk_size = 1 << (hr_dev->caps.sccc_ba_pg_sz
938 					+ PAGE_SHIFT);
939 			num_bt_l0 = hr_dev->caps.sccc_bt_num;
940 			hop_num = hr_dev->caps.sccc_hop_num;
941 			break;
942 		case HEM_TYPE_QPC_TIMER:
943 			buf_chunk_size = 1 << (hr_dev->caps.qpc_timer_buf_pg_sz
944 					+ PAGE_SHIFT);
945 			bt_chunk_size = 1 << (hr_dev->caps.qpc_timer_ba_pg_sz
946 					+ PAGE_SHIFT);
947 			num_bt_l0 = hr_dev->caps.qpc_timer_bt_num;
948 			hop_num = hr_dev->caps.qpc_timer_hop_num;
949 			break;
950 		case HEM_TYPE_CQC_TIMER:
951 			buf_chunk_size = 1 << (hr_dev->caps.cqc_timer_buf_pg_sz
952 					+ PAGE_SHIFT);
953 			bt_chunk_size = 1 << (hr_dev->caps.cqc_timer_ba_pg_sz
954 					+ PAGE_SHIFT);
955 			num_bt_l0 = hr_dev->caps.cqc_timer_bt_num;
956 			hop_num = hr_dev->caps.cqc_timer_hop_num;
957 			break;
958 		case HEM_TYPE_SRQC:
959 			buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
960 					+ PAGE_SHIFT);
961 			bt_chunk_size = 1 << (hr_dev->caps.srqc_ba_pg_sz
962 					+ PAGE_SHIFT);
963 			num_bt_l0 = hr_dev->caps.srqc_bt_num;
964 			hop_num = hr_dev->caps.srqc_hop_num;
965 			break;
966 		case HEM_TYPE_MTT:
967 			buf_chunk_size = 1 << (hr_dev->caps.mtt_ba_pg_sz
968 					+ PAGE_SHIFT);
969 			bt_chunk_size = buf_chunk_size;
970 			hop_num = hr_dev->caps.mtt_hop_num;
971 			break;
972 		case HEM_TYPE_CQE:
973 			buf_chunk_size = 1 << (hr_dev->caps.cqe_ba_pg_sz
974 					+ PAGE_SHIFT);
975 			bt_chunk_size = buf_chunk_size;
976 			hop_num = hr_dev->caps.cqe_hop_num;
977 			break;
978 		case HEM_TYPE_SRQWQE:
979 			buf_chunk_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz
980 					+ PAGE_SHIFT);
981 			bt_chunk_size = buf_chunk_size;
982 			hop_num = hr_dev->caps.srqwqe_hop_num;
983 			break;
984 		case HEM_TYPE_IDX:
985 			buf_chunk_size = 1 << (hr_dev->caps.idx_ba_pg_sz
986 					+ PAGE_SHIFT);
987 			bt_chunk_size = buf_chunk_size;
988 			hop_num = hr_dev->caps.idx_hop_num;
989 			break;
990 		default:
991 			dev_err(dev,
992 			  "Table %d not support to init hem table here!\n",
993 			  type);
994 			return -EINVAL;
995 		}
996 		obj_per_chunk = buf_chunk_size / obj_size;
997 		num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
998 		bt_chunk_num = bt_chunk_size / BA_BYTE_LEN;
999 		if (type >= HEM_TYPE_MTT)
1000 			num_bt_l0 = bt_chunk_num;
1001 
1002 		table->hem = kcalloc(num_hem, sizeof(*table->hem),
1003 					 GFP_KERNEL);
1004 		if (!table->hem)
1005 			goto err_kcalloc_hem_buf;
1006 
1007 		if (check_whether_bt_num_3(type, hop_num)) {
1008 			unsigned long num_bt_l1;
1009 
1010 			num_bt_l1 = (num_hem + bt_chunk_num - 1) /
1011 					     bt_chunk_num;
1012 			table->bt_l1 = kcalloc(num_bt_l1,
1013 					       sizeof(*table->bt_l1),
1014 					       GFP_KERNEL);
1015 			if (!table->bt_l1)
1016 				goto err_kcalloc_bt_l1;
1017 
1018 			table->bt_l1_dma_addr = kcalloc(num_bt_l1,
1019 						 sizeof(*table->bt_l1_dma_addr),
1020 						 GFP_KERNEL);
1021 
1022 			if (!table->bt_l1_dma_addr)
1023 				goto err_kcalloc_l1_dma;
1024 		}
1025 
1026 		if (check_whether_bt_num_2(type, hop_num) ||
1027 			check_whether_bt_num_3(type, hop_num)) {
1028 			table->bt_l0 = kcalloc(num_bt_l0, sizeof(*table->bt_l0),
1029 					       GFP_KERNEL);
1030 			if (!table->bt_l0)
1031 				goto err_kcalloc_bt_l0;
1032 
1033 			table->bt_l0_dma_addr = kcalloc(num_bt_l0,
1034 						 sizeof(*table->bt_l0_dma_addr),
1035 						 GFP_KERNEL);
1036 			if (!table->bt_l0_dma_addr)
1037 				goto err_kcalloc_l0_dma;
1038 		}
1039 	}
1040 
1041 	table->type = type;
1042 	table->num_hem = num_hem;
1043 	table->num_obj = nobj;
1044 	table->obj_size = obj_size;
1045 	table->lowmem = use_lowmem;
1046 	mutex_init(&table->mutex);
1047 
1048 	return 0;
1049 
1050 err_kcalloc_l0_dma:
1051 	kfree(table->bt_l0);
1052 	table->bt_l0 = NULL;
1053 
1054 err_kcalloc_bt_l0:
1055 	kfree(table->bt_l1_dma_addr);
1056 	table->bt_l1_dma_addr = NULL;
1057 
1058 err_kcalloc_l1_dma:
1059 	kfree(table->bt_l1);
1060 	table->bt_l1 = NULL;
1061 
1062 err_kcalloc_bt_l1:
1063 	kfree(table->hem);
1064 	table->hem = NULL;
1065 
1066 err_kcalloc_hem_buf:
1067 	return -ENOMEM;
1068 }
1069 
1070 static void hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev *hr_dev,
1071 					    struct hns_roce_hem_table *table)
1072 {
1073 	struct hns_roce_hem_mhop mhop;
1074 	u32 buf_chunk_size;
1075 	int i;
1076 	u64 obj;
1077 
1078 	hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
1079 	buf_chunk_size = table->type < HEM_TYPE_MTT ? mhop.buf_chunk_size :
1080 					mhop.bt_chunk_size;
1081 
1082 	for (i = 0; i < table->num_hem; ++i) {
1083 		obj = i * buf_chunk_size / table->obj_size;
1084 		if (table->hem[i])
1085 			hns_roce_table_mhop_put(hr_dev, table, obj, 0);
1086 	}
1087 
1088 	kfree(table->hem);
1089 	table->hem = NULL;
1090 	kfree(table->bt_l1);
1091 	table->bt_l1 = NULL;
1092 	kfree(table->bt_l1_dma_addr);
1093 	table->bt_l1_dma_addr = NULL;
1094 	kfree(table->bt_l0);
1095 	table->bt_l0 = NULL;
1096 	kfree(table->bt_l0_dma_addr);
1097 	table->bt_l0_dma_addr = NULL;
1098 }
1099 
1100 void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
1101 				struct hns_roce_hem_table *table)
1102 {
1103 	struct device *dev = hr_dev->dev;
1104 	unsigned long i;
1105 
1106 	if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
1107 		hns_roce_cleanup_mhop_hem_table(hr_dev, table);
1108 		return;
1109 	}
1110 
1111 	for (i = 0; i < table->num_hem; ++i)
1112 		if (table->hem[i]) {
1113 			if (hr_dev->hw->clear_hem(hr_dev, table,
1114 			    i * table->table_chunk_size / table->obj_size, 0))
1115 				dev_err(dev, "Clear HEM base address failed.\n");
1116 
1117 			hns_roce_free_hem(hr_dev, table->hem[i]);
1118 		}
1119 
1120 	kfree(table->hem);
1121 }
1122 
1123 void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
1124 {
1125 	if ((hr_dev->caps.num_idx_segs))
1126 		hns_roce_cleanup_hem_table(hr_dev,
1127 					   &hr_dev->mr_table.mtt_idx_table);
1128 	if (hr_dev->caps.num_srqwqe_segs)
1129 		hns_roce_cleanup_hem_table(hr_dev,
1130 					   &hr_dev->mr_table.mtt_srqwqe_table);
1131 	if (hr_dev->caps.srqc_entry_sz)
1132 		hns_roce_cleanup_hem_table(hr_dev,
1133 					   &hr_dev->srq_table.table);
1134 	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
1135 	if (hr_dev->caps.qpc_timer_entry_sz)
1136 		hns_roce_cleanup_hem_table(hr_dev,
1137 					   &hr_dev->qpc_timer_table);
1138 	if (hr_dev->caps.cqc_timer_entry_sz)
1139 		hns_roce_cleanup_hem_table(hr_dev,
1140 					   &hr_dev->cqc_timer_table);
1141 	if (hr_dev->caps.sccc_entry_sz)
1142 		hns_roce_cleanup_hem_table(hr_dev,
1143 					   &hr_dev->qp_table.sccc_table);
1144 	if (hr_dev->caps.trrl_entry_sz)
1145 		hns_roce_cleanup_hem_table(hr_dev,
1146 					   &hr_dev->qp_table.trrl_table);
1147 	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
1148 	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
1149 	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
1150 	if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
1151 		hns_roce_cleanup_hem_table(hr_dev,
1152 					   &hr_dev->mr_table.mtt_cqe_table);
1153 	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
1154 }
1155 
1156 struct roce_hem_item {
1157 	struct list_head list; /* link all hems in the same bt level */
1158 	struct list_head sibling; /* link all hems in last hop for mtt */
1159 	void *addr;
1160 	dma_addr_t dma_addr;
1161 	size_t count; /* max ba numbers */
1162 	int start; /* start buf offset in this hem */
1163 	int end; /* end buf offset in this hem */
1164 };
1165 
1166 static struct roce_hem_item *hem_list_alloc_item(struct hns_roce_dev *hr_dev,
1167 						   int start, int end,
1168 						   int count, bool exist_bt,
1169 						   int bt_level)
1170 {
1171 	struct roce_hem_item *hem;
1172 
1173 	hem = kzalloc(sizeof(*hem), GFP_KERNEL);
1174 	if (!hem)
1175 		return NULL;
1176 
1177 	if (exist_bt) {
1178 		hem->addr = dma_alloc_coherent(hr_dev->dev,
1179 						   count * BA_BYTE_LEN,
1180 						   &hem->dma_addr, GFP_KERNEL);
1181 		if (!hem->addr) {
1182 			kfree(hem);
1183 			return NULL;
1184 		}
1185 	}
1186 
1187 	hem->count = count;
1188 	hem->start = start;
1189 	hem->end = end;
1190 	INIT_LIST_HEAD(&hem->list);
1191 	INIT_LIST_HEAD(&hem->sibling);
1192 
1193 	return hem;
1194 }
1195 
1196 static void hem_list_free_item(struct hns_roce_dev *hr_dev,
1197 			       struct roce_hem_item *hem, bool exist_bt)
1198 {
1199 	if (exist_bt)
1200 		dma_free_coherent(hr_dev->dev, hem->count * BA_BYTE_LEN,
1201 				  hem->addr, hem->dma_addr);
1202 	kfree(hem);
1203 }
1204 
1205 static void hem_list_free_all(struct hns_roce_dev *hr_dev,
1206 			      struct list_head *head, bool exist_bt)
1207 {
1208 	struct roce_hem_item *hem, *temp_hem;
1209 
1210 	list_for_each_entry_safe(hem, temp_hem, head, list) {
1211 		list_del(&hem->list);
1212 		hem_list_free_item(hr_dev, hem, exist_bt);
1213 	}
1214 }
1215 
1216 static void hem_list_link_bt(struct hns_roce_dev *hr_dev, void *base_addr,
1217 			     u64 table_addr)
1218 {
1219 	*(u64 *)(base_addr) = table_addr;
1220 }
1221 
1222 /* assign L0 table address to hem from root bt */
1223 static void hem_list_assign_bt(struct hns_roce_dev *hr_dev,
1224 			       struct roce_hem_item *hem, void *cpu_addr,
1225 			       u64 phy_addr)
1226 {
1227 	hem->addr = cpu_addr;
1228 	hem->dma_addr = (dma_addr_t)phy_addr;
1229 }
1230 
1231 static inline bool hem_list_page_is_in_range(struct roce_hem_item *hem,
1232 					     int offset)
1233 {
1234 	return (hem->start <= offset && offset <= hem->end);
1235 }
1236 
1237 static struct roce_hem_item *hem_list_search_item(struct list_head *ba_list,
1238 						    int page_offset)
1239 {
1240 	struct roce_hem_item *hem, *temp_hem;
1241 	struct roce_hem_item *found = NULL;
1242 
1243 	list_for_each_entry_safe(hem, temp_hem, ba_list, list) {
1244 		if (hem_list_page_is_in_range(hem, page_offset)) {
1245 			found = hem;
1246 			break;
1247 		}
1248 	}
1249 
1250 	return found;
1251 }
1252 
1253 static bool hem_list_is_bottom_bt(int hopnum, int bt_level)
1254 {
1255 	/*
1256 	 * hopnum    base address table levels
1257 	 * 0		L0(buf)
1258 	 * 1		L0 -> buf
1259 	 * 2		L0 -> L1 -> buf
1260 	 * 3		L0 -> L1 -> L2 -> buf
1261 	 */
1262 	return bt_level >= (hopnum ? hopnum - 1 : hopnum);
1263 }
1264 
1265 /**
1266  * calc base address entries num
1267  * @hopnum: num of mutihop addressing
1268  * @bt_level: base address table level
1269  * @unit: ba entries per bt page
1270  */
1271 static u32 hem_list_calc_ba_range(int hopnum, int bt_level, int unit)
1272 {
1273 	u32 step;
1274 	int max;
1275 	int i;
1276 
1277 	if (hopnum <= bt_level)
1278 		return 0;
1279 	/*
1280 	 * hopnum  bt_level   range
1281 	 * 1	      0       unit
1282 	 * ------------
1283 	 * 2	      0       unit * unit
1284 	 * 2	      1       unit
1285 	 * ------------
1286 	 * 3	      0       unit * unit * unit
1287 	 * 3	      1       unit * unit
1288 	 * 3	      2       unit
1289 	 */
1290 	step = 1;
1291 	max = hopnum - bt_level;
1292 	for (i = 0; i < max; i++)
1293 		step = step * unit;
1294 
1295 	return step;
1296 }
1297 
1298 /**
1299  * calc the root ba entries which could cover all regions
1300  * @regions: buf region array
1301  * @region_cnt: array size of @regions
1302  * @unit: ba entries per bt page
1303  */
1304 int hns_roce_hem_list_calc_root_ba(const struct hns_roce_buf_region *regions,
1305 				   int region_cnt, int unit)
1306 {
1307 	struct hns_roce_buf_region *r;
1308 	int total = 0;
1309 	int step;
1310 	int i;
1311 
1312 	for (i = 0; i < region_cnt; i++) {
1313 		r = (struct hns_roce_buf_region *)&regions[i];
1314 		if (r->hopnum > 1) {
1315 			step = hem_list_calc_ba_range(r->hopnum, 1, unit);
1316 			if (step > 0)
1317 				total += (r->count + step - 1) / step;
1318 		} else {
1319 			total += r->count;
1320 		}
1321 	}
1322 
1323 	return total;
1324 }
1325 
1326 static int hem_list_alloc_mid_bt(struct hns_roce_dev *hr_dev,
1327 				 const struct hns_roce_buf_region *r, int unit,
1328 				 int offset, struct list_head *mid_bt,
1329 				 struct list_head *btm_bt)
1330 {
1331 	struct roce_hem_item *hem_ptrs[HNS_ROCE_MAX_BT_LEVEL] = { NULL };
1332 	struct list_head temp_list[HNS_ROCE_MAX_BT_LEVEL];
1333 	struct roce_hem_item *cur, *pre;
1334 	const int hopnum = r->hopnum;
1335 	int start_aligned;
1336 	int distance;
1337 	int ret = 0;
1338 	int max_ofs;
1339 	int level;
1340 	u32 step;
1341 	int end;
1342 
1343 	if (hopnum <= 1)
1344 		return 0;
1345 
1346 	if (hopnum > HNS_ROCE_MAX_BT_LEVEL) {
1347 		dev_err(hr_dev->dev, "invalid hopnum %d!\n", hopnum);
1348 		return -EINVAL;
1349 	}
1350 
1351 	if (offset < r->offset) {
1352 		dev_err(hr_dev->dev, "invalid offset %d,min %d!\n",
1353 			offset, r->offset);
1354 		return -EINVAL;
1355 	}
1356 
1357 	distance = offset - r->offset;
1358 	max_ofs = r->offset + r->count - 1;
1359 	for (level = 0; level < hopnum; level++)
1360 		INIT_LIST_HEAD(&temp_list[level]);
1361 
1362 	/* config L1 bt to last bt and link them to corresponding parent */
1363 	for (level = 1; level < hopnum; level++) {
1364 		cur = hem_list_search_item(&mid_bt[level], offset);
1365 		if (cur) {
1366 			hem_ptrs[level] = cur;
1367 			continue;
1368 		}
1369 
1370 		step = hem_list_calc_ba_range(hopnum, level, unit);
1371 		if (step < 1) {
1372 			ret = -EINVAL;
1373 			goto err_exit;
1374 		}
1375 
1376 		start_aligned = (distance / step) * step + r->offset;
1377 		end = min_t(int, start_aligned + step - 1, max_ofs);
1378 		cur = hem_list_alloc_item(hr_dev, start_aligned, end, unit,
1379 					  true, level);
1380 		if (!cur) {
1381 			ret = -ENOMEM;
1382 			goto err_exit;
1383 		}
1384 		hem_ptrs[level] = cur;
1385 		list_add(&cur->list, &temp_list[level]);
1386 		if (hem_list_is_bottom_bt(hopnum, level))
1387 			list_add(&cur->sibling, &temp_list[0]);
1388 
1389 		/* link bt to parent bt */
1390 		if (level > 1) {
1391 			pre = hem_ptrs[level - 1];
1392 			step = (cur->start - pre->start) / step * BA_BYTE_LEN;
1393 			hem_list_link_bt(hr_dev, pre->addr + step,
1394 					 cur->dma_addr);
1395 		}
1396 	}
1397 
1398 	list_splice(&temp_list[0], btm_bt);
1399 	for (level = 1; level < hopnum; level++)
1400 		list_splice(&temp_list[level], &mid_bt[level]);
1401 
1402 	return 0;
1403 
1404 err_exit:
1405 	for (level = 1; level < hopnum; level++)
1406 		hem_list_free_all(hr_dev, &temp_list[level], true);
1407 
1408 	return ret;
1409 }
1410 
1411 static int hem_list_alloc_root_bt(struct hns_roce_dev *hr_dev,
1412 				  struct hns_roce_hem_list *hem_list, int unit,
1413 				  const struct hns_roce_buf_region *regions,
1414 				  int region_cnt)
1415 {
1416 	struct roce_hem_item *hem, *temp_hem, *root_hem;
1417 	struct list_head temp_list[HNS_ROCE_MAX_BT_REGION];
1418 	const struct hns_roce_buf_region *r;
1419 	struct list_head temp_root;
1420 	struct list_head temp_btm;
1421 	void *cpu_base;
1422 	u64 phy_base;
1423 	int ret = 0;
1424 	int offset;
1425 	int total;
1426 	int step;
1427 	int i;
1428 
1429 	r = &regions[0];
1430 	root_hem = hem_list_search_item(&hem_list->root_bt, r->offset);
1431 	if (root_hem)
1432 		return 0;
1433 
1434 	INIT_LIST_HEAD(&temp_root);
1435 	total = r->offset;
1436 	/* indicate to last region */
1437 	r = &regions[region_cnt - 1];
1438 	root_hem = hem_list_alloc_item(hr_dev, total, r->offset + r->count - 1,
1439 				       unit, true, 0);
1440 	if (!root_hem)
1441 		return -ENOMEM;
1442 	list_add(&root_hem->list, &temp_root);
1443 
1444 	hem_list->root_ba = root_hem->dma_addr;
1445 
1446 	INIT_LIST_HEAD(&temp_btm);
1447 	for (i = 0; i < region_cnt; i++)
1448 		INIT_LIST_HEAD(&temp_list[i]);
1449 
1450 	total = 0;
1451 	for (i = 0; i < region_cnt && total < unit; i++) {
1452 		r = &regions[i];
1453 		if (!r->count)
1454 			continue;
1455 
1456 		/* all regions's mid[x][0] shared the root_bt's trunk */
1457 		cpu_base = root_hem->addr + total * BA_BYTE_LEN;
1458 		phy_base = root_hem->dma_addr + total * BA_BYTE_LEN;
1459 
1460 		/* if hopnum is 0 or 1, cut a new fake hem from the root bt
1461 		 * which's address share to all regions.
1462 		 */
1463 		if (hem_list_is_bottom_bt(r->hopnum, 0)) {
1464 			hem = hem_list_alloc_item(hr_dev, r->offset,
1465 						  r->offset + r->count - 1,
1466 						  r->count, false, 0);
1467 			if (!hem) {
1468 				ret = -ENOMEM;
1469 				goto err_exit;
1470 			}
1471 			hem_list_assign_bt(hr_dev, hem, cpu_base, phy_base);
1472 			list_add(&hem->list, &temp_list[i]);
1473 			list_add(&hem->sibling, &temp_btm);
1474 			total += r->count;
1475 		} else {
1476 			step = hem_list_calc_ba_range(r->hopnum, 1, unit);
1477 			if (step < 1) {
1478 				ret = -EINVAL;
1479 				goto err_exit;
1480 			}
1481 			/* if exist mid bt, link L1 to L0 */
1482 			list_for_each_entry_safe(hem, temp_hem,
1483 					  &hem_list->mid_bt[i][1], list) {
1484 				offset = hem->start / step * BA_BYTE_LEN;
1485 				hem_list_link_bt(hr_dev, cpu_base + offset,
1486 						 hem->dma_addr);
1487 				total++;
1488 			}
1489 		}
1490 	}
1491 
1492 	list_splice(&temp_btm, &hem_list->btm_bt);
1493 	list_splice(&temp_root, &hem_list->root_bt);
1494 	for (i = 0; i < region_cnt; i++)
1495 		list_splice(&temp_list[i], &hem_list->mid_bt[i][0]);
1496 
1497 	return 0;
1498 
1499 err_exit:
1500 	for (i = 0; i < region_cnt; i++)
1501 		hem_list_free_all(hr_dev, &temp_list[i], false);
1502 
1503 	hem_list_free_all(hr_dev, &temp_root, true);
1504 
1505 	return ret;
1506 }
1507 
1508 /* construct the base address table and link them by address hop config */
1509 int hns_roce_hem_list_request(struct hns_roce_dev *hr_dev,
1510 			      struct hns_roce_hem_list *hem_list,
1511 			      const struct hns_roce_buf_region *regions,
1512 			      int region_cnt)
1513 {
1514 	const struct hns_roce_buf_region *r;
1515 	int ofs, end;
1516 	int ret = 0;
1517 	int unit;
1518 	int i;
1519 
1520 	if (region_cnt > HNS_ROCE_MAX_BT_REGION) {
1521 		dev_err(hr_dev->dev, "invalid region region_cnt %d!\n",
1522 			region_cnt);
1523 		return -EINVAL;
1524 	}
1525 
1526 	unit = (1 << hem_list->bt_pg_shift) / BA_BYTE_LEN;
1527 	for (i = 0; i < region_cnt; i++) {
1528 		r = &regions[i];
1529 		if (!r->count)
1530 			continue;
1531 
1532 		end = r->offset + r->count;
1533 		for (ofs = r->offset; ofs < end; ofs += unit) {
1534 			ret = hem_list_alloc_mid_bt(hr_dev, r, unit, ofs,
1535 						    hem_list->mid_bt[i],
1536 						    &hem_list->btm_bt);
1537 			if (ret) {
1538 				dev_err(hr_dev->dev,
1539 					"alloc hem trunk fail ret=%d!\n", ret);
1540 				goto err_alloc;
1541 			}
1542 		}
1543 	}
1544 
1545 	ret = hem_list_alloc_root_bt(hr_dev, hem_list, unit, regions,
1546 				     region_cnt);
1547 	if (ret)
1548 		dev_err(hr_dev->dev, "alloc hem root fail ret=%d!\n", ret);
1549 	else
1550 		return 0;
1551 
1552 err_alloc:
1553 	hns_roce_hem_list_release(hr_dev, hem_list);
1554 
1555 	return ret;
1556 }
1557 
1558 void hns_roce_hem_list_release(struct hns_roce_dev *hr_dev,
1559 			       struct hns_roce_hem_list *hem_list)
1560 {
1561 	int i, j;
1562 
1563 	for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++)
1564 		for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++)
1565 			hem_list_free_all(hr_dev, &hem_list->mid_bt[i][j],
1566 					  j != 0);
1567 
1568 	hem_list_free_all(hr_dev, &hem_list->root_bt, true);
1569 	INIT_LIST_HEAD(&hem_list->btm_bt);
1570 	hem_list->root_ba = 0;
1571 }
1572 
1573 void hns_roce_hem_list_init(struct hns_roce_hem_list *hem_list,
1574 			    int bt_page_order)
1575 {
1576 	int i, j;
1577 
1578 	INIT_LIST_HEAD(&hem_list->root_bt);
1579 	INIT_LIST_HEAD(&hem_list->btm_bt);
1580 	for (i = 0; i < HNS_ROCE_MAX_BT_REGION; i++)
1581 		for (j = 0; j < HNS_ROCE_MAX_BT_LEVEL; j++)
1582 			INIT_LIST_HEAD(&hem_list->mid_bt[i][j]);
1583 
1584 	hem_list->bt_pg_shift = bt_page_order;
1585 }
1586 
1587 void *hns_roce_hem_list_find_mtt(struct hns_roce_dev *hr_dev,
1588 				 struct hns_roce_hem_list *hem_list,
1589 				 int offset, int *mtt_cnt, u64 *phy_addr)
1590 {
1591 	struct list_head *head = &hem_list->btm_bt;
1592 	struct roce_hem_item *hem, *temp_hem;
1593 	void *cpu_base = NULL;
1594 	u64 phy_base = 0;
1595 	int nr = 0;
1596 
1597 	list_for_each_entry_safe(hem, temp_hem, head, sibling) {
1598 		if (hem_list_page_is_in_range(hem, offset)) {
1599 			nr = offset - hem->start;
1600 			cpu_base = hem->addr + nr * BA_BYTE_LEN;
1601 			phy_base = hem->dma_addr + nr * BA_BYTE_LEN;
1602 			nr = hem->end + 1 - offset;
1603 			break;
1604 		}
1605 	}
1606 
1607 	if (mtt_cnt)
1608 		*mtt_cnt = nr;
1609 
1610 	if (phy_addr)
1611 		*phy_addr = phy_base;
1612 
1613 	return cpu_base;
1614 }
1615