1 /*
2  * Copyright (c) 2016 Hisilicon Limited.
3  * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33 
34 #include <linux/platform_device.h>
35 #include "hns_roce_device.h"
36 #include "hns_roce_hem.h"
37 #include "hns_roce_common.h"
38 
39 #define DMA_ADDR_T_SHIFT		12
40 #define BT_BA_SHIFT			32
41 
42 bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type)
43 {
44 	if ((hr_dev->caps.qpc_hop_num && type == HEM_TYPE_QPC) ||
45 	    (hr_dev->caps.mpt_hop_num && type == HEM_TYPE_MTPT) ||
46 	    (hr_dev->caps.cqc_hop_num && type == HEM_TYPE_CQC) ||
47 	    (hr_dev->caps.srqc_hop_num && type == HEM_TYPE_SRQC) ||
48 	    (hr_dev->caps.cqe_hop_num && type == HEM_TYPE_CQE) ||
49 	    (hr_dev->caps.mtt_hop_num && type == HEM_TYPE_MTT) ||
50 	    (hr_dev->caps.srqwqe_hop_num && type == HEM_TYPE_SRQWQE) ||
51 	    (hr_dev->caps.idx_hop_num && type == HEM_TYPE_IDX))
52 		return true;
53 
54 	return false;
55 }
56 EXPORT_SYMBOL_GPL(hns_roce_check_whether_mhop);
57 
58 static bool hns_roce_check_hem_null(struct hns_roce_hem **hem, u64 start_idx,
59 			    u32 bt_chunk_num)
60 {
61 	int i;
62 
63 	for (i = 0; i < bt_chunk_num; i++)
64 		if (hem[start_idx + i])
65 			return false;
66 
67 	return true;
68 }
69 
70 static bool hns_roce_check_bt_null(u64 **bt, u64 start_idx, u32 bt_chunk_num)
71 {
72 	int i;
73 
74 	for (i = 0; i < bt_chunk_num; i++)
75 		if (bt[start_idx + i])
76 			return false;
77 
78 	return true;
79 }
80 
81 static int hns_roce_get_bt_num(u32 table_type, u32 hop_num)
82 {
83 	if (check_whether_bt_num_3(table_type, hop_num))
84 		return 3;
85 	else if (check_whether_bt_num_2(table_type, hop_num))
86 		return 2;
87 	else if (check_whether_bt_num_1(table_type, hop_num))
88 		return 1;
89 	else
90 		return 0;
91 }
92 
93 int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
94 			   struct hns_roce_hem_table *table, unsigned long *obj,
95 			   struct hns_roce_hem_mhop *mhop)
96 {
97 	struct device *dev = hr_dev->dev;
98 	u32 chunk_ba_num;
99 	u32 table_idx;
100 	u32 bt_num;
101 	u32 chunk_size;
102 
103 	switch (table->type) {
104 	case HEM_TYPE_QPC:
105 		mhop->buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz
106 					     + PAGE_SHIFT);
107 		mhop->bt_chunk_size = 1 << (hr_dev->caps.qpc_ba_pg_sz
108 					     + PAGE_SHIFT);
109 		mhop->ba_l0_num = hr_dev->caps.qpc_bt_num;
110 		mhop->hop_num = hr_dev->caps.qpc_hop_num;
111 		break;
112 	case HEM_TYPE_MTPT:
113 		mhop->buf_chunk_size = 1 << (hr_dev->caps.mpt_buf_pg_sz
114 					     + PAGE_SHIFT);
115 		mhop->bt_chunk_size = 1 << (hr_dev->caps.mpt_ba_pg_sz
116 					     + PAGE_SHIFT);
117 		mhop->ba_l0_num = hr_dev->caps.mpt_bt_num;
118 		mhop->hop_num = hr_dev->caps.mpt_hop_num;
119 		break;
120 	case HEM_TYPE_CQC:
121 		mhop->buf_chunk_size = 1 << (hr_dev->caps.cqc_buf_pg_sz
122 					     + PAGE_SHIFT);
123 		mhop->bt_chunk_size = 1 << (hr_dev->caps.cqc_ba_pg_sz
124 					    + PAGE_SHIFT);
125 		mhop->ba_l0_num = hr_dev->caps.cqc_bt_num;
126 		mhop->hop_num = hr_dev->caps.cqc_hop_num;
127 		break;
128 	case HEM_TYPE_SRQC:
129 		mhop->buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
130 					     + PAGE_SHIFT);
131 		mhop->bt_chunk_size = 1 << (hr_dev->caps.srqc_ba_pg_sz
132 					     + PAGE_SHIFT);
133 		mhop->ba_l0_num = hr_dev->caps.srqc_bt_num;
134 		mhop->hop_num = hr_dev->caps.srqc_hop_num;
135 		break;
136 	case HEM_TYPE_MTT:
137 		mhop->buf_chunk_size = 1 << (hr_dev->caps.mtt_buf_pg_sz
138 					     + PAGE_SHIFT);
139 		mhop->bt_chunk_size = 1 << (hr_dev->caps.mtt_ba_pg_sz
140 					     + PAGE_SHIFT);
141 		mhop->ba_l0_num = mhop->bt_chunk_size / 8;
142 		mhop->hop_num = hr_dev->caps.mtt_hop_num;
143 		break;
144 	case HEM_TYPE_CQE:
145 		mhop->buf_chunk_size = 1 << (hr_dev->caps.cqe_buf_pg_sz
146 					     + PAGE_SHIFT);
147 		mhop->bt_chunk_size = 1 << (hr_dev->caps.cqe_ba_pg_sz
148 					     + PAGE_SHIFT);
149 		mhop->ba_l0_num = mhop->bt_chunk_size / 8;
150 		mhop->hop_num = hr_dev->caps.cqe_hop_num;
151 		break;
152 	case HEM_TYPE_SRQWQE:
153 		mhop->buf_chunk_size = 1 << (hr_dev->caps.srqwqe_buf_pg_sz
154 					    + PAGE_SHIFT);
155 		mhop->bt_chunk_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz
156 					    + PAGE_SHIFT);
157 		mhop->ba_l0_num = mhop->bt_chunk_size / 8;
158 		mhop->hop_num = hr_dev->caps.srqwqe_hop_num;
159 		break;
160 	case HEM_TYPE_IDX:
161 		mhop->buf_chunk_size = 1 << (hr_dev->caps.idx_buf_pg_sz
162 				       + PAGE_SHIFT);
163 		mhop->bt_chunk_size = 1 << (hr_dev->caps.idx_ba_pg_sz
164 				       + PAGE_SHIFT);
165 		mhop->ba_l0_num = mhop->bt_chunk_size / 8;
166 		mhop->hop_num = hr_dev->caps.idx_hop_num;
167 		break;
168 	default:
169 		dev_err(dev, "Table %d not support multi-hop addressing!\n",
170 			 table->type);
171 		return -EINVAL;
172 	}
173 
174 	if (!obj)
175 		return 0;
176 
177 	/*
178 	 * QPC/MTPT/CQC/SRQC alloc hem for buffer pages.
179 	 * MTT/CQE alloc hem for bt pages.
180 	 */
181 	bt_num = hns_roce_get_bt_num(table->type, mhop->hop_num);
182 	chunk_ba_num = mhop->bt_chunk_size / 8;
183 	chunk_size = table->type < HEM_TYPE_MTT ? mhop->buf_chunk_size :
184 			      mhop->bt_chunk_size;
185 	table_idx = (*obj & (table->num_obj - 1)) /
186 		     (chunk_size / table->obj_size);
187 	switch (bt_num) {
188 	case 3:
189 		mhop->l2_idx = table_idx & (chunk_ba_num - 1);
190 		mhop->l1_idx = table_idx / chunk_ba_num & (chunk_ba_num - 1);
191 		mhop->l0_idx = (table_idx / chunk_ba_num) / chunk_ba_num;
192 		break;
193 	case 2:
194 		mhop->l1_idx = table_idx & (chunk_ba_num - 1);
195 		mhop->l0_idx = table_idx / chunk_ba_num;
196 		break;
197 	case 1:
198 		mhop->l0_idx = table_idx;
199 		break;
200 	default:
201 		dev_err(dev, "Table %d not support hop_num = %d!\n",
202 			     table->type, mhop->hop_num);
203 		return -EINVAL;
204 	}
205 	if (mhop->l0_idx >= mhop->ba_l0_num)
206 		mhop->l0_idx %= mhop->ba_l0_num;
207 
208 	return 0;
209 }
210 EXPORT_SYMBOL_GPL(hns_roce_calc_hem_mhop);
211 
212 static struct hns_roce_hem *hns_roce_alloc_hem(struct hns_roce_dev *hr_dev,
213 					       int npages,
214 					       unsigned long hem_alloc_size,
215 					       gfp_t gfp_mask)
216 {
217 	struct hns_roce_hem_chunk *chunk = NULL;
218 	struct hns_roce_hem *hem;
219 	struct scatterlist *mem;
220 	int order;
221 	void *buf;
222 
223 	WARN_ON(gfp_mask & __GFP_HIGHMEM);
224 
225 	hem = kmalloc(sizeof(*hem),
226 		      gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
227 	if (!hem)
228 		return NULL;
229 
230 	hem->refcount = 0;
231 	INIT_LIST_HEAD(&hem->chunk_list);
232 
233 	order = get_order(hem_alloc_size);
234 
235 	while (npages > 0) {
236 		if (!chunk) {
237 			chunk = kmalloc(sizeof(*chunk),
238 				gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
239 			if (!chunk)
240 				goto fail;
241 
242 			sg_init_table(chunk->mem, HNS_ROCE_HEM_CHUNK_LEN);
243 			chunk->npages = 0;
244 			chunk->nsg = 0;
245 			memset(chunk->buf, 0, sizeof(chunk->buf));
246 			list_add_tail(&chunk->list, &hem->chunk_list);
247 		}
248 
249 		while (1 << order > npages)
250 			--order;
251 
252 		/*
253 		 * Alloc memory one time. If failed, don't alloc small block
254 		 * memory, directly return fail.
255 		 */
256 		mem = &chunk->mem[chunk->npages];
257 		buf = dma_alloc_coherent(hr_dev->dev, PAGE_SIZE << order,
258 				&sg_dma_address(mem), gfp_mask);
259 		if (!buf)
260 			goto fail;
261 
262 		chunk->buf[chunk->npages] = buf;
263 		sg_dma_len(mem) = PAGE_SIZE << order;
264 
265 		++chunk->npages;
266 		++chunk->nsg;
267 		npages -= 1 << order;
268 	}
269 
270 	return hem;
271 
272 fail:
273 	hns_roce_free_hem(hr_dev, hem);
274 	return NULL;
275 }
276 
277 void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem)
278 {
279 	struct hns_roce_hem_chunk *chunk, *tmp;
280 	int i;
281 
282 	if (!hem)
283 		return;
284 
285 	list_for_each_entry_safe(chunk, tmp, &hem->chunk_list, list) {
286 		for (i = 0; i < chunk->npages; ++i)
287 			dma_free_coherent(hr_dev->dev,
288 				   sg_dma_len(&chunk->mem[i]),
289 				   chunk->buf[i],
290 				   sg_dma_address(&chunk->mem[i]));
291 		kfree(chunk);
292 	}
293 
294 	kfree(hem);
295 }
296 
297 static int hns_roce_set_hem(struct hns_roce_dev *hr_dev,
298 			    struct hns_roce_hem_table *table, unsigned long obj)
299 {
300 	spinlock_t *lock = &hr_dev->bt_cmd_lock;
301 	struct device *dev = hr_dev->dev;
302 	unsigned long end = 0;
303 	unsigned long flags;
304 	struct hns_roce_hem_iter iter;
305 	void __iomem *bt_cmd;
306 	u32 bt_cmd_h_val = 0;
307 	u32 bt_cmd_val[2];
308 	u32 bt_cmd_l = 0;
309 	u64 bt_ba = 0;
310 	int ret = 0;
311 
312 	/* Find the HEM(Hardware Entry Memory) entry */
313 	unsigned long i = (obj & (table->num_obj - 1)) /
314 			  (table->table_chunk_size / table->obj_size);
315 
316 	switch (table->type) {
317 	case HEM_TYPE_QPC:
318 		roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
319 			       ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_QPC);
320 		break;
321 	case HEM_TYPE_MTPT:
322 		roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
323 			       ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
324 			       HEM_TYPE_MTPT);
325 		break;
326 	case HEM_TYPE_CQC:
327 		roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
328 			       ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, HEM_TYPE_CQC);
329 		break;
330 	case HEM_TYPE_SRQC:
331 		roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M,
332 			       ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S,
333 			       HEM_TYPE_SRQC);
334 		break;
335 	default:
336 		return ret;
337 	}
338 	roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M,
339 		       ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj);
340 	roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0);
341 	roce_set_bit(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1);
342 
343 	/* Currently iter only a chunk */
344 	for (hns_roce_hem_first(table->hem[i], &iter);
345 	     !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) {
346 		bt_ba = hns_roce_hem_addr(&iter) >> DMA_ADDR_T_SHIFT;
347 
348 		spin_lock_irqsave(lock, flags);
349 
350 		bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG;
351 
352 		end = msecs_to_jiffies(HW_SYNC_TIMEOUT_MSECS) + jiffies;
353 		while (1) {
354 			if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) {
355 				if (!(time_before(jiffies, end))) {
356 					dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n");
357 					spin_unlock_irqrestore(lock, flags);
358 					return -EBUSY;
359 				}
360 			} else {
361 				break;
362 			}
363 			mdelay(HW_SYNC_SLEEP_TIME_INTERVAL);
364 		}
365 
366 		bt_cmd_l = (u32)bt_ba;
367 		roce_set_field(bt_cmd_h_val, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M,
368 			       ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S,
369 			       bt_ba >> BT_BA_SHIFT);
370 
371 		bt_cmd_val[0] = bt_cmd_l;
372 		bt_cmd_val[1] = bt_cmd_h_val;
373 		hns_roce_write64_k(bt_cmd_val,
374 				   hr_dev->reg_base + ROCEE_BT_CMD_L_REG);
375 		spin_unlock_irqrestore(lock, flags);
376 	}
377 
378 	return ret;
379 }
380 
381 static int hns_roce_table_mhop_get(struct hns_roce_dev *hr_dev,
382 				   struct hns_roce_hem_table *table,
383 				   unsigned long obj)
384 {
385 	struct device *dev = hr_dev->dev;
386 	struct hns_roce_hem_mhop mhop;
387 	struct hns_roce_hem_iter iter;
388 	u32 buf_chunk_size;
389 	u32 bt_chunk_size;
390 	u32 chunk_ba_num;
391 	u32 hop_num;
392 	u32 size;
393 	u32 bt_num;
394 	u64 hem_idx;
395 	u64 bt_l1_idx = 0;
396 	u64 bt_l0_idx = 0;
397 	u64 bt_ba;
398 	unsigned long mhop_obj = obj;
399 	int bt_l1_allocated = 0;
400 	int bt_l0_allocated = 0;
401 	int step_idx;
402 	int ret;
403 
404 	ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
405 	if (ret)
406 		return ret;
407 
408 	buf_chunk_size = mhop.buf_chunk_size;
409 	bt_chunk_size = mhop.bt_chunk_size;
410 	hop_num = mhop.hop_num;
411 	chunk_ba_num = bt_chunk_size / 8;
412 
413 	bt_num = hns_roce_get_bt_num(table->type, hop_num);
414 	switch (bt_num) {
415 	case 3:
416 		hem_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num +
417 			  mhop.l1_idx * chunk_ba_num + mhop.l2_idx;
418 		bt_l1_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
419 		bt_l0_idx = mhop.l0_idx;
420 		break;
421 	case 2:
422 		hem_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
423 		bt_l0_idx = mhop.l0_idx;
424 		break;
425 	case 1:
426 		hem_idx = mhop.l0_idx;
427 		break;
428 	default:
429 		dev_err(dev, "Table %d not support hop_num = %d!\n",
430 			     table->type, hop_num);
431 		return -EINVAL;
432 	}
433 
434 	mutex_lock(&table->mutex);
435 
436 	if (table->hem[hem_idx]) {
437 		++table->hem[hem_idx]->refcount;
438 		goto out;
439 	}
440 
441 	/* alloc L1 BA's chunk */
442 	if ((check_whether_bt_num_3(table->type, hop_num) ||
443 		check_whether_bt_num_2(table->type, hop_num)) &&
444 		!table->bt_l0[bt_l0_idx]) {
445 		table->bt_l0[bt_l0_idx] = dma_alloc_coherent(dev, bt_chunk_size,
446 					    &(table->bt_l0_dma_addr[bt_l0_idx]),
447 					    GFP_KERNEL);
448 		if (!table->bt_l0[bt_l0_idx]) {
449 			ret = -ENOMEM;
450 			goto out;
451 		}
452 		bt_l0_allocated = 1;
453 
454 		/* set base address to hardware */
455 		if (table->type < HEM_TYPE_MTT) {
456 			step_idx = 0;
457 			if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) {
458 				ret = -ENODEV;
459 				dev_err(dev, "set HEM base address to HW failed!\n");
460 				goto err_dma_alloc_l1;
461 			}
462 		}
463 	}
464 
465 	/* alloc L2 BA's chunk */
466 	if (check_whether_bt_num_3(table->type, hop_num) &&
467 	    !table->bt_l1[bt_l1_idx])  {
468 		table->bt_l1[bt_l1_idx] = dma_alloc_coherent(dev, bt_chunk_size,
469 					    &(table->bt_l1_dma_addr[bt_l1_idx]),
470 					    GFP_KERNEL);
471 		if (!table->bt_l1[bt_l1_idx]) {
472 			ret = -ENOMEM;
473 			goto err_dma_alloc_l1;
474 		}
475 		bt_l1_allocated = 1;
476 		*(table->bt_l0[bt_l0_idx] + mhop.l1_idx) =
477 					       table->bt_l1_dma_addr[bt_l1_idx];
478 
479 		/* set base address to hardware */
480 		step_idx = 1;
481 		if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) {
482 			ret = -ENODEV;
483 			dev_err(dev, "set HEM base address to HW failed!\n");
484 			goto err_alloc_hem_buf;
485 		}
486 	}
487 
488 	/*
489 	 * alloc buffer space chunk for QPC/MTPT/CQC/SRQC.
490 	 * alloc bt space chunk for MTT/CQE.
491 	 */
492 	size = table->type < HEM_TYPE_MTT ? buf_chunk_size : bt_chunk_size;
493 	table->hem[hem_idx] = hns_roce_alloc_hem(hr_dev,
494 						size >> PAGE_SHIFT,
495 						size,
496 						(table->lowmem ? GFP_KERNEL :
497 						GFP_HIGHUSER) | __GFP_NOWARN);
498 	if (!table->hem[hem_idx]) {
499 		ret = -ENOMEM;
500 		goto err_alloc_hem_buf;
501 	}
502 
503 	hns_roce_hem_first(table->hem[hem_idx], &iter);
504 	bt_ba = hns_roce_hem_addr(&iter);
505 
506 	if (table->type < HEM_TYPE_MTT) {
507 		if (hop_num == 2) {
508 			*(table->bt_l1[bt_l1_idx] + mhop.l2_idx) = bt_ba;
509 			step_idx = 2;
510 		} else if (hop_num == 1) {
511 			*(table->bt_l0[bt_l0_idx] + mhop.l1_idx) = bt_ba;
512 			step_idx = 1;
513 		} else if (hop_num == HNS_ROCE_HOP_NUM_0) {
514 			step_idx = 0;
515 		} else {
516 			ret = -EINVAL;
517 			goto err_dma_alloc_l1;
518 		}
519 
520 		/* set HEM base address to hardware */
521 		if (hr_dev->hw->set_hem(hr_dev, table, obj, step_idx)) {
522 			ret = -ENODEV;
523 			dev_err(dev, "set HEM base address to HW failed!\n");
524 			goto err_alloc_hem_buf;
525 		}
526 	} else if (hop_num == 2) {
527 		*(table->bt_l0[bt_l0_idx] + mhop.l1_idx) = bt_ba;
528 	}
529 
530 	++table->hem[hem_idx]->refcount;
531 	goto out;
532 
533 err_alloc_hem_buf:
534 	if (bt_l1_allocated) {
535 		dma_free_coherent(dev, bt_chunk_size, table->bt_l1[bt_l1_idx],
536 				  table->bt_l1_dma_addr[bt_l1_idx]);
537 		table->bt_l1[bt_l1_idx] = NULL;
538 	}
539 
540 err_dma_alloc_l1:
541 	if (bt_l0_allocated) {
542 		dma_free_coherent(dev, bt_chunk_size, table->bt_l0[bt_l0_idx],
543 				  table->bt_l0_dma_addr[bt_l0_idx]);
544 		table->bt_l0[bt_l0_idx] = NULL;
545 	}
546 
547 out:
548 	mutex_unlock(&table->mutex);
549 	return ret;
550 }
551 
552 int hns_roce_table_get(struct hns_roce_dev *hr_dev,
553 		       struct hns_roce_hem_table *table, unsigned long obj)
554 {
555 	struct device *dev = hr_dev->dev;
556 	int ret = 0;
557 	unsigned long i;
558 
559 	if (hns_roce_check_whether_mhop(hr_dev, table->type))
560 		return hns_roce_table_mhop_get(hr_dev, table, obj);
561 
562 	i = (obj & (table->num_obj - 1)) / (table->table_chunk_size /
563 	     table->obj_size);
564 
565 	mutex_lock(&table->mutex);
566 
567 	if (table->hem[i]) {
568 		++table->hem[i]->refcount;
569 		goto out;
570 	}
571 
572 	table->hem[i] = hns_roce_alloc_hem(hr_dev,
573 				       table->table_chunk_size >> PAGE_SHIFT,
574 				       table->table_chunk_size,
575 				       (table->lowmem ? GFP_KERNEL :
576 					GFP_HIGHUSER) | __GFP_NOWARN);
577 	if (!table->hem[i]) {
578 		ret = -ENOMEM;
579 		goto out;
580 	}
581 
582 	/* Set HEM base address(128K/page, pa) to Hardware */
583 	if (hns_roce_set_hem(hr_dev, table, obj)) {
584 		hns_roce_free_hem(hr_dev, table->hem[i]);
585 		table->hem[i] = NULL;
586 		ret = -ENODEV;
587 		dev_err(dev, "set HEM base address to HW failed.\n");
588 		goto out;
589 	}
590 
591 	++table->hem[i]->refcount;
592 out:
593 	mutex_unlock(&table->mutex);
594 	return ret;
595 }
596 
597 static void hns_roce_table_mhop_put(struct hns_roce_dev *hr_dev,
598 				    struct hns_roce_hem_table *table,
599 				    unsigned long obj,
600 				    int check_refcount)
601 {
602 	struct device *dev = hr_dev->dev;
603 	struct hns_roce_hem_mhop mhop;
604 	unsigned long mhop_obj = obj;
605 	u32 bt_chunk_size;
606 	u32 chunk_ba_num;
607 	u32 hop_num;
608 	u32 start_idx;
609 	u32 bt_num;
610 	u64 hem_idx;
611 	u64 bt_l1_idx = 0;
612 	int ret;
613 
614 	ret = hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
615 	if (ret)
616 		return;
617 
618 	bt_chunk_size = mhop.bt_chunk_size;
619 	hop_num = mhop.hop_num;
620 	chunk_ba_num = bt_chunk_size / 8;
621 
622 	bt_num = hns_roce_get_bt_num(table->type, hop_num);
623 	switch (bt_num) {
624 	case 3:
625 		hem_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num +
626 			  mhop.l1_idx * chunk_ba_num + mhop.l2_idx;
627 		bt_l1_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
628 		break;
629 	case 2:
630 		hem_idx = mhop.l0_idx * chunk_ba_num + mhop.l1_idx;
631 		break;
632 	case 1:
633 		hem_idx = mhop.l0_idx;
634 		break;
635 	default:
636 		dev_err(dev, "Table %d not support hop_num = %d!\n",
637 			     table->type, hop_num);
638 		return;
639 	}
640 
641 	mutex_lock(&table->mutex);
642 
643 	if (check_refcount && (--table->hem[hem_idx]->refcount > 0)) {
644 		mutex_unlock(&table->mutex);
645 		return;
646 	}
647 
648 	if (table->type < HEM_TYPE_MTT && hop_num == 1) {
649 		if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1))
650 			dev_warn(dev, "Clear HEM base address failed.\n");
651 	} else if (table->type < HEM_TYPE_MTT && hop_num == 2) {
652 		if (hr_dev->hw->clear_hem(hr_dev, table, obj, 2))
653 			dev_warn(dev, "Clear HEM base address failed.\n");
654 	} else if (table->type < HEM_TYPE_MTT &&
655 		   hop_num == HNS_ROCE_HOP_NUM_0) {
656 		if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
657 			dev_warn(dev, "Clear HEM base address failed.\n");
658 	}
659 
660 	/*
661 	 * free buffer space chunk for QPC/MTPT/CQC/SRQC.
662 	 * free bt space chunk for MTT/CQE.
663 	 */
664 	hns_roce_free_hem(hr_dev, table->hem[hem_idx]);
665 	table->hem[hem_idx] = NULL;
666 
667 	if (check_whether_bt_num_2(table->type, hop_num)) {
668 		start_idx = mhop.l0_idx * chunk_ba_num;
669 		if (hns_roce_check_hem_null(table->hem, start_idx,
670 					    chunk_ba_num)) {
671 			if (table->type < HEM_TYPE_MTT &&
672 			    hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
673 				dev_warn(dev, "Clear HEM base address failed.\n");
674 
675 			dma_free_coherent(dev, bt_chunk_size,
676 					  table->bt_l0[mhop.l0_idx],
677 					  table->bt_l0_dma_addr[mhop.l0_idx]);
678 			table->bt_l0[mhop.l0_idx] = NULL;
679 		}
680 	} else if (check_whether_bt_num_3(table->type, hop_num)) {
681 		start_idx = mhop.l0_idx * chunk_ba_num * chunk_ba_num +
682 			    mhop.l1_idx * chunk_ba_num;
683 		if (hns_roce_check_hem_null(table->hem, start_idx,
684 					    chunk_ba_num)) {
685 			if (hr_dev->hw->clear_hem(hr_dev, table, obj, 1))
686 				dev_warn(dev, "Clear HEM base address failed.\n");
687 
688 			dma_free_coherent(dev, bt_chunk_size,
689 					  table->bt_l1[bt_l1_idx],
690 					  table->bt_l1_dma_addr[bt_l1_idx]);
691 			table->bt_l1[bt_l1_idx] = NULL;
692 
693 			start_idx = mhop.l0_idx * chunk_ba_num;
694 			if (hns_roce_check_bt_null(table->bt_l1, start_idx,
695 						   chunk_ba_num)) {
696 				if (hr_dev->hw->clear_hem(hr_dev, table, obj,
697 							  0))
698 					dev_warn(dev, "Clear HEM base address failed.\n");
699 
700 				dma_free_coherent(dev, bt_chunk_size,
701 					    table->bt_l0[mhop.l0_idx],
702 					    table->bt_l0_dma_addr[mhop.l0_idx]);
703 				table->bt_l0[mhop.l0_idx] = NULL;
704 			}
705 		}
706 	}
707 
708 	mutex_unlock(&table->mutex);
709 }
710 
711 void hns_roce_table_put(struct hns_roce_dev *hr_dev,
712 			struct hns_roce_hem_table *table, unsigned long obj)
713 {
714 	struct device *dev = hr_dev->dev;
715 	unsigned long i;
716 
717 	if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
718 		hns_roce_table_mhop_put(hr_dev, table, obj, 1);
719 		return;
720 	}
721 
722 	i = (obj & (table->num_obj - 1)) /
723 	    (table->table_chunk_size / table->obj_size);
724 
725 	mutex_lock(&table->mutex);
726 
727 	if (--table->hem[i]->refcount == 0) {
728 		/* Clear HEM base address */
729 		if (hr_dev->hw->clear_hem(hr_dev, table, obj, 0))
730 			dev_warn(dev, "Clear HEM base address failed.\n");
731 
732 		hns_roce_free_hem(hr_dev, table->hem[i]);
733 		table->hem[i] = NULL;
734 	}
735 
736 	mutex_unlock(&table->mutex);
737 }
738 
739 void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
740 			  struct hns_roce_hem_table *table,
741 			  unsigned long obj, dma_addr_t *dma_handle)
742 {
743 	struct hns_roce_hem_chunk *chunk;
744 	struct hns_roce_hem_mhop mhop;
745 	struct hns_roce_hem *hem;
746 	void *addr = NULL;
747 	unsigned long mhop_obj = obj;
748 	unsigned long obj_per_chunk;
749 	unsigned long idx_offset;
750 	int offset, dma_offset;
751 	int length;
752 	int i, j;
753 	u32 hem_idx = 0;
754 
755 	if (!table->lowmem)
756 		return NULL;
757 
758 	mutex_lock(&table->mutex);
759 
760 	if (!hns_roce_check_whether_mhop(hr_dev, table->type)) {
761 		obj_per_chunk = table->table_chunk_size / table->obj_size;
762 		hem = table->hem[(obj & (table->num_obj - 1)) / obj_per_chunk];
763 		idx_offset = (obj & (table->num_obj - 1)) % obj_per_chunk;
764 		dma_offset = offset = idx_offset * table->obj_size;
765 	} else {
766 		hns_roce_calc_hem_mhop(hr_dev, table, &mhop_obj, &mhop);
767 		/* mtt mhop */
768 		i = mhop.l0_idx;
769 		j = mhop.l1_idx;
770 		if (mhop.hop_num == 2)
771 			hem_idx = i * (mhop.bt_chunk_size / 8) + j;
772 		else if (mhop.hop_num == 1 ||
773 			 mhop.hop_num == HNS_ROCE_HOP_NUM_0)
774 			hem_idx = i;
775 
776 		hem = table->hem[hem_idx];
777 		dma_offset = offset = (obj & (table->num_obj - 1)) *
778 				       table->obj_size % mhop.bt_chunk_size;
779 		if (mhop.hop_num == 2)
780 			dma_offset = offset = 0;
781 	}
782 
783 	if (!hem)
784 		goto out;
785 
786 	list_for_each_entry(chunk, &hem->chunk_list, list) {
787 		for (i = 0; i < chunk->npages; ++i) {
788 			length = sg_dma_len(&chunk->mem[i]);
789 			if (dma_handle && dma_offset >= 0) {
790 				if (length > (u32)dma_offset)
791 					*dma_handle = sg_dma_address(
792 						&chunk->mem[i]) + dma_offset;
793 				dma_offset -= length;
794 			}
795 
796 			if (length > (u32)offset) {
797 				addr = chunk->buf[i] + offset;
798 				goto out;
799 			}
800 			offset -= length;
801 		}
802 	}
803 
804 out:
805 	mutex_unlock(&table->mutex);
806 	return addr;
807 }
808 EXPORT_SYMBOL_GPL(hns_roce_table_find);
809 
810 int hns_roce_table_get_range(struct hns_roce_dev *hr_dev,
811 			     struct hns_roce_hem_table *table,
812 			     unsigned long start, unsigned long end)
813 {
814 	struct hns_roce_hem_mhop mhop;
815 	unsigned long inc = table->table_chunk_size / table->obj_size;
816 	unsigned long i;
817 	int ret;
818 
819 	if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
820 		hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
821 		inc = mhop.bt_chunk_size / table->obj_size;
822 	}
823 
824 	/* Allocate MTT entry memory according to chunk(128K) */
825 	for (i = start; i <= end; i += inc) {
826 		ret = hns_roce_table_get(hr_dev, table, i);
827 		if (ret)
828 			goto fail;
829 	}
830 
831 	return 0;
832 
833 fail:
834 	while (i > start) {
835 		i -= inc;
836 		hns_roce_table_put(hr_dev, table, i);
837 	}
838 	return ret;
839 }
840 
841 void hns_roce_table_put_range(struct hns_roce_dev *hr_dev,
842 			      struct hns_roce_hem_table *table,
843 			      unsigned long start, unsigned long end)
844 {
845 	struct hns_roce_hem_mhop mhop;
846 	unsigned long inc = table->table_chunk_size / table->obj_size;
847 	unsigned long i;
848 
849 	if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
850 		hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
851 		inc = mhop.bt_chunk_size / table->obj_size;
852 	}
853 
854 	for (i = start; i <= end; i += inc)
855 		hns_roce_table_put(hr_dev, table, i);
856 }
857 
858 int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
859 			    struct hns_roce_hem_table *table, u32 type,
860 			    unsigned long obj_size, unsigned long nobj,
861 			    int use_lowmem)
862 {
863 	struct device *dev = hr_dev->dev;
864 	unsigned long obj_per_chunk;
865 	unsigned long num_hem;
866 
867 	if (!hns_roce_check_whether_mhop(hr_dev, type)) {
868 		table->table_chunk_size = hr_dev->caps.chunk_sz;
869 		obj_per_chunk = table->table_chunk_size / obj_size;
870 		num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
871 
872 		table->hem = kcalloc(num_hem, sizeof(*table->hem), GFP_KERNEL);
873 		if (!table->hem)
874 			return -ENOMEM;
875 	} else {
876 		unsigned long buf_chunk_size;
877 		unsigned long bt_chunk_size;
878 		unsigned long bt_chunk_num;
879 		unsigned long num_bt_l0 = 0;
880 		u32 hop_num;
881 
882 		switch (type) {
883 		case HEM_TYPE_QPC:
884 			buf_chunk_size = 1 << (hr_dev->caps.qpc_buf_pg_sz
885 					+ PAGE_SHIFT);
886 			bt_chunk_size = 1 << (hr_dev->caps.qpc_ba_pg_sz
887 					+ PAGE_SHIFT);
888 			num_bt_l0 = hr_dev->caps.qpc_bt_num;
889 			hop_num = hr_dev->caps.qpc_hop_num;
890 			break;
891 		case HEM_TYPE_MTPT:
892 			buf_chunk_size = 1 << (hr_dev->caps.mpt_buf_pg_sz
893 					+ PAGE_SHIFT);
894 			bt_chunk_size = 1 << (hr_dev->caps.mpt_ba_pg_sz
895 					+ PAGE_SHIFT);
896 			num_bt_l0 = hr_dev->caps.mpt_bt_num;
897 			hop_num = hr_dev->caps.mpt_hop_num;
898 			break;
899 		case HEM_TYPE_CQC:
900 			buf_chunk_size = 1 << (hr_dev->caps.cqc_buf_pg_sz
901 					+ PAGE_SHIFT);
902 			bt_chunk_size = 1 << (hr_dev->caps.cqc_ba_pg_sz
903 					+ PAGE_SHIFT);
904 			num_bt_l0 = hr_dev->caps.cqc_bt_num;
905 			hop_num = hr_dev->caps.cqc_hop_num;
906 			break;
907 		case HEM_TYPE_SRQC:
908 			buf_chunk_size = 1 << (hr_dev->caps.srqc_buf_pg_sz
909 					+ PAGE_SHIFT);
910 			bt_chunk_size = 1 << (hr_dev->caps.srqc_ba_pg_sz
911 					+ PAGE_SHIFT);
912 			num_bt_l0 = hr_dev->caps.srqc_bt_num;
913 			hop_num = hr_dev->caps.srqc_hop_num;
914 			break;
915 		case HEM_TYPE_MTT:
916 			buf_chunk_size = 1 << (hr_dev->caps.mtt_ba_pg_sz
917 					+ PAGE_SHIFT);
918 			bt_chunk_size = buf_chunk_size;
919 			hop_num = hr_dev->caps.mtt_hop_num;
920 			break;
921 		case HEM_TYPE_CQE:
922 			buf_chunk_size = 1 << (hr_dev->caps.cqe_ba_pg_sz
923 					+ PAGE_SHIFT);
924 			bt_chunk_size = buf_chunk_size;
925 			hop_num = hr_dev->caps.cqe_hop_num;
926 			break;
927 		case HEM_TYPE_SRQWQE:
928 			buf_chunk_size = 1 << (hr_dev->caps.srqwqe_ba_pg_sz
929 					+ PAGE_SHIFT);
930 			bt_chunk_size = buf_chunk_size;
931 			hop_num = hr_dev->caps.srqwqe_hop_num;
932 			break;
933 		case HEM_TYPE_IDX:
934 			buf_chunk_size = 1 << (hr_dev->caps.idx_ba_pg_sz
935 					+ PAGE_SHIFT);
936 			bt_chunk_size = buf_chunk_size;
937 			hop_num = hr_dev->caps.idx_hop_num;
938 			break;
939 		default:
940 			dev_err(dev,
941 			  "Table %d not support to init hem table here!\n",
942 			  type);
943 			return -EINVAL;
944 		}
945 		obj_per_chunk = buf_chunk_size / obj_size;
946 		num_hem = (nobj + obj_per_chunk - 1) / obj_per_chunk;
947 		bt_chunk_num = bt_chunk_size / 8;
948 		if (type >= HEM_TYPE_MTT)
949 			num_bt_l0 = bt_chunk_num;
950 
951 		table->hem = kcalloc(num_hem, sizeof(*table->hem),
952 					 GFP_KERNEL);
953 		if (!table->hem)
954 			goto err_kcalloc_hem_buf;
955 
956 		if (check_whether_bt_num_3(type, hop_num)) {
957 			unsigned long num_bt_l1;
958 
959 			num_bt_l1 = (num_hem + bt_chunk_num - 1) /
960 					     bt_chunk_num;
961 			table->bt_l1 = kcalloc(num_bt_l1,
962 					       sizeof(*table->bt_l1),
963 					       GFP_KERNEL);
964 			if (!table->bt_l1)
965 				goto err_kcalloc_bt_l1;
966 
967 			table->bt_l1_dma_addr = kcalloc(num_bt_l1,
968 						 sizeof(*table->bt_l1_dma_addr),
969 						 GFP_KERNEL);
970 
971 			if (!table->bt_l1_dma_addr)
972 				goto err_kcalloc_l1_dma;
973 		}
974 
975 		if (check_whether_bt_num_2(type, hop_num) ||
976 			check_whether_bt_num_3(type, hop_num)) {
977 			table->bt_l0 = kcalloc(num_bt_l0, sizeof(*table->bt_l0),
978 					       GFP_KERNEL);
979 			if (!table->bt_l0)
980 				goto err_kcalloc_bt_l0;
981 
982 			table->bt_l0_dma_addr = kcalloc(num_bt_l0,
983 						 sizeof(*table->bt_l0_dma_addr),
984 						 GFP_KERNEL);
985 			if (!table->bt_l0_dma_addr)
986 				goto err_kcalloc_l0_dma;
987 		}
988 	}
989 
990 	table->type = type;
991 	table->num_hem = num_hem;
992 	table->num_obj = nobj;
993 	table->obj_size = obj_size;
994 	table->lowmem = use_lowmem;
995 	mutex_init(&table->mutex);
996 
997 	return 0;
998 
999 err_kcalloc_l0_dma:
1000 	kfree(table->bt_l0);
1001 	table->bt_l0 = NULL;
1002 
1003 err_kcalloc_bt_l0:
1004 	kfree(table->bt_l1_dma_addr);
1005 	table->bt_l1_dma_addr = NULL;
1006 
1007 err_kcalloc_l1_dma:
1008 	kfree(table->bt_l1);
1009 	table->bt_l1 = NULL;
1010 
1011 err_kcalloc_bt_l1:
1012 	kfree(table->hem);
1013 	table->hem = NULL;
1014 
1015 err_kcalloc_hem_buf:
1016 	return -ENOMEM;
1017 }
1018 
1019 static void hns_roce_cleanup_mhop_hem_table(struct hns_roce_dev *hr_dev,
1020 					    struct hns_roce_hem_table *table)
1021 {
1022 	struct hns_roce_hem_mhop mhop;
1023 	u32 buf_chunk_size;
1024 	int i;
1025 	u64 obj;
1026 
1027 	hns_roce_calc_hem_mhop(hr_dev, table, NULL, &mhop);
1028 	buf_chunk_size = table->type < HEM_TYPE_MTT ? mhop.buf_chunk_size :
1029 					mhop.bt_chunk_size;
1030 
1031 	for (i = 0; i < table->num_hem; ++i) {
1032 		obj = i * buf_chunk_size / table->obj_size;
1033 		if (table->hem[i])
1034 			hns_roce_table_mhop_put(hr_dev, table, obj, 0);
1035 	}
1036 
1037 	kfree(table->hem);
1038 	table->hem = NULL;
1039 	kfree(table->bt_l1);
1040 	table->bt_l1 = NULL;
1041 	kfree(table->bt_l1_dma_addr);
1042 	table->bt_l1_dma_addr = NULL;
1043 	kfree(table->bt_l0);
1044 	table->bt_l0 = NULL;
1045 	kfree(table->bt_l0_dma_addr);
1046 	table->bt_l0_dma_addr = NULL;
1047 }
1048 
1049 void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
1050 				struct hns_roce_hem_table *table)
1051 {
1052 	struct device *dev = hr_dev->dev;
1053 	unsigned long i;
1054 
1055 	if (hns_roce_check_whether_mhop(hr_dev, table->type)) {
1056 		hns_roce_cleanup_mhop_hem_table(hr_dev, table);
1057 		return;
1058 	}
1059 
1060 	for (i = 0; i < table->num_hem; ++i)
1061 		if (table->hem[i]) {
1062 			if (hr_dev->hw->clear_hem(hr_dev, table,
1063 			    i * table->table_chunk_size / table->obj_size, 0))
1064 				dev_err(dev, "Clear HEM base address failed.\n");
1065 
1066 			hns_roce_free_hem(hr_dev, table->hem[i]);
1067 		}
1068 
1069 	kfree(table->hem);
1070 }
1071 
1072 void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev)
1073 {
1074 	if ((hr_dev->caps.num_idx_segs))
1075 		hns_roce_cleanup_hem_table(hr_dev,
1076 					   &hr_dev->mr_table.mtt_idx_table);
1077 	if (hr_dev->caps.num_srqwqe_segs)
1078 		hns_roce_cleanup_hem_table(hr_dev,
1079 					   &hr_dev->mr_table.mtt_srqwqe_table);
1080 	if (hr_dev->caps.srqc_entry_sz)
1081 		hns_roce_cleanup_hem_table(hr_dev,
1082 					   &hr_dev->srq_table.table);
1083 	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->cq_table.table);
1084 	if (hr_dev->caps.trrl_entry_sz)
1085 		hns_roce_cleanup_hem_table(hr_dev,
1086 					   &hr_dev->qp_table.trrl_table);
1087 	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.irrl_table);
1088 	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->qp_table.qp_table);
1089 	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtpt_table);
1090 	if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
1091 		hns_roce_cleanup_hem_table(hr_dev,
1092 					   &hr_dev->mr_table.mtt_cqe_table);
1093 	hns_roce_cleanup_hem_table(hr_dev, &hr_dev->mr_table.mtt_table);
1094 }
1095