xref: /openbmc/linux/drivers/infiniband/hw/irdma/pble.c (revision 8dda2eac)
1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
3 #include "osdep.h"
4 #include "status.h"
5 #include "hmc.h"
6 #include "defs.h"
7 #include "type.h"
8 #include "protos.h"
9 #include "pble.h"
10 
11 static enum irdma_status_code
12 add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc);
13 
14 /**
15  * irdma_destroy_pble_prm - destroy prm during module unload
16  * @pble_rsrc: pble resources
17  */
18 void irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
19 {
20 	struct irdma_chunk *chunk;
21 	struct irdma_pble_prm *pinfo = &pble_rsrc->pinfo;
22 
23 	while (!list_empty(&pinfo->clist)) {
24 		chunk = (struct irdma_chunk *) pinfo->clist.next;
25 		list_del(&chunk->list);
26 		if (chunk->type == PBLE_SD_PAGED)
27 			irdma_pble_free_paged_mem(chunk);
28 		if (chunk->bitmapbuf)
29 			kfree(chunk->bitmapmem.va);
30 		kfree(chunk->chunkmem.va);
31 	}
32 }
33 
34 /**
35  * irdma_hmc_init_pble - Initialize pble resources during module load
36  * @dev: irdma_sc_dev struct
37  * @pble_rsrc: pble resources
38  */
39 enum irdma_status_code
40 irdma_hmc_init_pble(struct irdma_sc_dev *dev,
41 		    struct irdma_hmc_pble_rsrc *pble_rsrc)
42 {
43 	struct irdma_hmc_info *hmc_info;
44 	u32 fpm_idx = 0;
45 	enum irdma_status_code status = 0;
46 
47 	hmc_info = dev->hmc_info;
48 	pble_rsrc->dev = dev;
49 	pble_rsrc->fpm_base_addr = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].base;
50 	/* Start pble' on 4k boundary */
51 	if (pble_rsrc->fpm_base_addr & 0xfff)
52 		fpm_idx = (4096 - (pble_rsrc->fpm_base_addr & 0xfff)) >> 3;
53 	pble_rsrc->unallocated_pble =
54 		hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt - fpm_idx;
55 	pble_rsrc->next_fpm_addr = pble_rsrc->fpm_base_addr + (fpm_idx << 3);
56 	pble_rsrc->pinfo.pble_shift = PBLE_SHIFT;
57 
58 	mutex_init(&pble_rsrc->pble_mutex_lock);
59 
60 	spin_lock_init(&pble_rsrc->pinfo.prm_lock);
61 	INIT_LIST_HEAD(&pble_rsrc->pinfo.clist);
62 	if (add_pble_prm(pble_rsrc)) {
63 		irdma_destroy_pble_prm(pble_rsrc);
64 		status = IRDMA_ERR_NO_MEMORY;
65 	}
66 
67 	return status;
68 }
69 
70 /**
71  * get_sd_pd_idx -  Returns sd index, pd index and rel_pd_idx from fpm address
72  * @pble_rsrc: structure containing fpm address
73  * @idx: where to return indexes
74  */
75 static void get_sd_pd_idx(struct irdma_hmc_pble_rsrc *pble_rsrc,
76 			  struct sd_pd_idx *idx)
77 {
78 	idx->sd_idx = (u32)pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE;
79 	idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr / IRDMA_HMC_PAGED_BP_SIZE);
80 	idx->rel_pd_idx = (idx->pd_idx % IRDMA_HMC_PD_CNT_IN_SD);
81 }
82 
83 /**
84  * add_sd_direct - add sd direct for pble
85  * @pble_rsrc: pble resource ptr
86  * @info: page info for sd
87  */
88 static enum irdma_status_code
89 add_sd_direct(struct irdma_hmc_pble_rsrc *pble_rsrc,
90 	      struct irdma_add_page_info *info)
91 {
92 	struct irdma_sc_dev *dev = pble_rsrc->dev;
93 	enum irdma_status_code ret_code = 0;
94 	struct sd_pd_idx *idx = &info->idx;
95 	struct irdma_chunk *chunk = info->chunk;
96 	struct irdma_hmc_info *hmc_info = info->hmc_info;
97 	struct irdma_hmc_sd_entry *sd_entry = info->sd_entry;
98 	u32 offset = 0;
99 
100 	if (!sd_entry->valid) {
101 		ret_code = irdma_add_sd_table_entry(dev->hw, hmc_info,
102 						    info->idx.sd_idx,
103 						    IRDMA_SD_TYPE_DIRECT,
104 						    IRDMA_HMC_DIRECT_BP_SIZE);
105 		if (ret_code)
106 			return ret_code;
107 
108 		chunk->type = PBLE_SD_CONTIGOUS;
109 	}
110 
111 	offset = idx->rel_pd_idx << HMC_PAGED_BP_SHIFT;
112 	chunk->size = info->pages << HMC_PAGED_BP_SHIFT;
113 	chunk->vaddr = sd_entry->u.bp.addr.va + offset;
114 	chunk->fpm_addr = pble_rsrc->next_fpm_addr;
115 	ibdev_dbg(to_ibdev(dev),
116 		  "PBLE: chunk_size[%lld] = 0x%llx vaddr=0x%pK fpm_addr = %llx\n",
117 		  chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr);
118 
119 	return 0;
120 }
121 
122 /**
123  * fpm_to_idx - given fpm address, get pble index
124  * @pble_rsrc: pble resource management
125  * @addr: fpm address for index
126  */
127 static u32 fpm_to_idx(struct irdma_hmc_pble_rsrc *pble_rsrc, u64 addr)
128 {
129 	u64 idx;
130 
131 	idx = (addr - (pble_rsrc->fpm_base_addr)) >> 3;
132 
133 	return (u32)idx;
134 }
135 
136 /**
137  * add_bp_pages - add backing pages for sd
138  * @pble_rsrc: pble resource management
139  * @info: page info for sd
140  */
141 static enum irdma_status_code
142 add_bp_pages(struct irdma_hmc_pble_rsrc *pble_rsrc,
143 	     struct irdma_add_page_info *info)
144 {
145 	struct irdma_sc_dev *dev = pble_rsrc->dev;
146 	u8 *addr;
147 	struct irdma_dma_mem mem;
148 	struct irdma_hmc_pd_entry *pd_entry;
149 	struct irdma_hmc_sd_entry *sd_entry = info->sd_entry;
150 	struct irdma_hmc_info *hmc_info = info->hmc_info;
151 	struct irdma_chunk *chunk = info->chunk;
152 	enum irdma_status_code status = 0;
153 	u32 rel_pd_idx = info->idx.rel_pd_idx;
154 	u32 pd_idx = info->idx.pd_idx;
155 	u32 i;
156 
157 	if (irdma_pble_get_paged_mem(chunk, info->pages))
158 		return IRDMA_ERR_NO_MEMORY;
159 
160 	status = irdma_add_sd_table_entry(dev->hw, hmc_info, info->idx.sd_idx,
161 					  IRDMA_SD_TYPE_PAGED,
162 					  IRDMA_HMC_DIRECT_BP_SIZE);
163 	if (status)
164 		goto error;
165 
166 	addr = chunk->vaddr;
167 	for (i = 0; i < info->pages; i++) {
168 		mem.pa = (u64)chunk->dmainfo.dmaaddrs[i];
169 		mem.size = 4096;
170 		mem.va = addr;
171 		pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx++];
172 		if (!pd_entry->valid) {
173 			status = irdma_add_pd_table_entry(dev, hmc_info,
174 							  pd_idx++, &mem);
175 			if (status)
176 				goto error;
177 
178 			addr += 4096;
179 		}
180 	}
181 
182 	chunk->fpm_addr = pble_rsrc->next_fpm_addr;
183 	return 0;
184 
185 error:
186 	irdma_pble_free_paged_mem(chunk);
187 
188 	return status;
189 }
190 
191 /**
192  * irdma_get_type - add a sd entry type for sd
193  * @dev: irdma_sc_dev struct
194  * @idx: index of sd
195  * @pages: pages in the sd
196  */
197 static enum irdma_sd_entry_type irdma_get_type(struct irdma_sc_dev *dev,
198 					       struct sd_pd_idx *idx, u32 pages)
199 {
200 	enum irdma_sd_entry_type sd_entry_type;
201 
202 	sd_entry_type = !idx->rel_pd_idx && pages == IRDMA_HMC_PD_CNT_IN_SD ?
203 			IRDMA_SD_TYPE_DIRECT : IRDMA_SD_TYPE_PAGED;
204 	return sd_entry_type;
205 }
206 
207 /**
208  * add_pble_prm - add a sd entry for pble resoure
209  * @pble_rsrc: pble resource management
210  */
211 static enum irdma_status_code
212 add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
213 {
214 	struct irdma_sc_dev *dev = pble_rsrc->dev;
215 	struct irdma_hmc_sd_entry *sd_entry;
216 	struct irdma_hmc_info *hmc_info;
217 	struct irdma_chunk *chunk;
218 	struct irdma_add_page_info info;
219 	struct sd_pd_idx *idx = &info.idx;
220 	enum irdma_status_code ret_code = 0;
221 	enum irdma_sd_entry_type sd_entry_type;
222 	u64 sd_reg_val = 0;
223 	struct irdma_virt_mem chunkmem;
224 	u32 pages;
225 
226 	if (pble_rsrc->unallocated_pble < PBLE_PER_PAGE)
227 		return IRDMA_ERR_NO_MEMORY;
228 
229 	if (pble_rsrc->next_fpm_addr & 0xfff)
230 		return IRDMA_ERR_INVALID_PAGE_DESC_INDEX;
231 
232 	chunkmem.size = sizeof(*chunk);
233 	chunkmem.va = kzalloc(chunkmem.size, GFP_KERNEL);
234 	if (!chunkmem.va)
235 		return IRDMA_ERR_NO_MEMORY;
236 
237 	chunk = chunkmem.va;
238 	chunk->chunkmem = chunkmem;
239 	hmc_info = dev->hmc_info;
240 	chunk->dev = dev;
241 	chunk->fpm_addr = pble_rsrc->next_fpm_addr;
242 	get_sd_pd_idx(pble_rsrc, idx);
243 	sd_entry = &hmc_info->sd_table.sd_entry[idx->sd_idx];
244 	pages = (idx->rel_pd_idx) ? (IRDMA_HMC_PD_CNT_IN_SD - idx->rel_pd_idx) :
245 				    IRDMA_HMC_PD_CNT_IN_SD;
246 	pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT);
247 	info.chunk = chunk;
248 	info.hmc_info = hmc_info;
249 	info.pages = pages;
250 	info.sd_entry = sd_entry;
251 	if (!sd_entry->valid)
252 		sd_entry_type = irdma_get_type(dev, idx, pages);
253 	else
254 		sd_entry_type = sd_entry->entry_type;
255 
256 	ibdev_dbg(to_ibdev(dev),
257 		  "PBLE: pages = %d, unallocated_pble[%d] current_fpm_addr = %llx\n",
258 		  pages, pble_rsrc->unallocated_pble,
259 		  pble_rsrc->next_fpm_addr);
260 	ibdev_dbg(to_ibdev(dev), "PBLE: sd_entry_type = %d\n", sd_entry_type);
261 	if (sd_entry_type == IRDMA_SD_TYPE_DIRECT)
262 		ret_code = add_sd_direct(pble_rsrc, &info);
263 
264 	if (ret_code)
265 		sd_entry_type = IRDMA_SD_TYPE_PAGED;
266 	else
267 		pble_rsrc->stats_direct_sds++;
268 
269 	if (sd_entry_type == IRDMA_SD_TYPE_PAGED) {
270 		ret_code = add_bp_pages(pble_rsrc, &info);
271 		if (ret_code)
272 			goto error;
273 		else
274 			pble_rsrc->stats_paged_sds++;
275 	}
276 
277 	ret_code = irdma_prm_add_pble_mem(&pble_rsrc->pinfo, chunk);
278 	if (ret_code)
279 		goto error;
280 
281 	pble_rsrc->next_fpm_addr += chunk->size;
282 	ibdev_dbg(to_ibdev(dev),
283 		  "PBLE: next_fpm_addr = %llx chunk_size[%llu] = 0x%llx\n",
284 		  pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
285 	pble_rsrc->unallocated_pble -= (u32)(chunk->size >> 3);
286 	list_add(&chunk->list, &pble_rsrc->pinfo.clist);
287 	sd_reg_val = (sd_entry_type == IRDMA_SD_TYPE_PAGED) ?
288 			     sd_entry->u.pd_table.pd_page_addr.pa :
289 			     sd_entry->u.bp.addr.pa;
290 
291 	if (!sd_entry->valid) {
292 		ret_code = irdma_hmc_sd_one(dev, hmc_info->hmc_fn_id, sd_reg_val,
293 					    idx->sd_idx, sd_entry->entry_type, true);
294 		if (ret_code)
295 			goto error;
296 	}
297 
298 	sd_entry->valid = true;
299 	return 0;
300 
301 error:
302 	if (chunk->bitmapbuf)
303 		kfree(chunk->bitmapmem.va);
304 	kfree(chunk->chunkmem.va);
305 
306 	return ret_code;
307 }
308 
309 /**
310  * free_lvl2 - fee level 2 pble
311  * @pble_rsrc: pble resource management
312  * @palloc: level 2 pble allocation
313  */
314 static void free_lvl2(struct irdma_hmc_pble_rsrc *pble_rsrc,
315 		      struct irdma_pble_alloc *palloc)
316 {
317 	u32 i;
318 	struct irdma_pble_level2 *lvl2 = &palloc->level2;
319 	struct irdma_pble_info *root = &lvl2->root;
320 	struct irdma_pble_info *leaf = lvl2->leaf;
321 
322 	for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
323 		if (leaf->addr)
324 			irdma_prm_return_pbles(&pble_rsrc->pinfo,
325 					       &leaf->chunkinfo);
326 		else
327 			break;
328 	}
329 
330 	if (root->addr)
331 		irdma_prm_return_pbles(&pble_rsrc->pinfo, &root->chunkinfo);
332 
333 	kfree(lvl2->leafmem.va);
334 	lvl2->leaf = NULL;
335 }
336 
337 /**
338  * get_lvl2_pble - get level 2 pble resource
339  * @pble_rsrc: pble resource management
340  * @palloc: level 2 pble allocation
341  */
342 static enum irdma_status_code
343 get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
344 	      struct irdma_pble_alloc *palloc)
345 {
346 	u32 lf4k, lflast, total, i;
347 	u32 pblcnt = PBLE_PER_PAGE;
348 	u64 *addr;
349 	struct irdma_pble_level2 *lvl2 = &palloc->level2;
350 	struct irdma_pble_info *root = &lvl2->root;
351 	struct irdma_pble_info *leaf;
352 	enum irdma_status_code ret_code;
353 	u64 fpm_addr;
354 
355 	/* number of full 512 (4K) leafs) */
356 	lf4k = palloc->total_cnt >> 9;
357 	lflast = palloc->total_cnt % PBLE_PER_PAGE;
358 	total = (lflast == 0) ? lf4k : lf4k + 1;
359 	lvl2->leaf_cnt = total;
360 
361 	lvl2->leafmem.size = (sizeof(*leaf) * total);
362 	lvl2->leafmem.va = kzalloc(lvl2->leafmem.size, GFP_KERNEL);
363 	if (!lvl2->leafmem.va)
364 		return IRDMA_ERR_NO_MEMORY;
365 
366 	lvl2->leaf = lvl2->leafmem.va;
367 	leaf = lvl2->leaf;
368 	ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo, &root->chunkinfo,
369 				       total << 3, &root->addr, &fpm_addr);
370 	if (ret_code) {
371 		kfree(lvl2->leafmem.va);
372 		lvl2->leaf = NULL;
373 		return IRDMA_ERR_NO_MEMORY;
374 	}
375 
376 	root->idx = fpm_to_idx(pble_rsrc, fpm_addr);
377 	root->cnt = total;
378 	addr = root->addr;
379 	for (i = 0; i < total; i++, leaf++) {
380 		pblcnt = (lflast && ((i + 1) == total)) ?
381 				lflast : PBLE_PER_PAGE;
382 		ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo,
383 					       &leaf->chunkinfo, pblcnt << 3,
384 					       &leaf->addr, &fpm_addr);
385 		if (ret_code)
386 			goto error;
387 
388 		leaf->idx = fpm_to_idx(pble_rsrc, fpm_addr);
389 
390 		leaf->cnt = pblcnt;
391 		*addr = (u64)leaf->idx;
392 		addr++;
393 	}
394 
395 	palloc->level = PBLE_LEVEL_2;
396 	pble_rsrc->stats_lvl2++;
397 	return 0;
398 
399 error:
400 	free_lvl2(pble_rsrc, palloc);
401 
402 	return IRDMA_ERR_NO_MEMORY;
403 }
404 
405 /**
406  * get_lvl1_pble - get level 1 pble resource
407  * @pble_rsrc: pble resource management
408  * @palloc: level 1 pble allocation
409  */
410 static enum irdma_status_code
411 get_lvl1_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
412 	      struct irdma_pble_alloc *palloc)
413 {
414 	enum irdma_status_code ret_code;
415 	u64 fpm_addr;
416 	struct irdma_pble_info *lvl1 = &palloc->level1;
417 
418 	ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo, &lvl1->chunkinfo,
419 				       palloc->total_cnt << 3, &lvl1->addr,
420 				       &fpm_addr);
421 	if (ret_code)
422 		return IRDMA_ERR_NO_MEMORY;
423 
424 	palloc->level = PBLE_LEVEL_1;
425 	lvl1->idx = fpm_to_idx(pble_rsrc, fpm_addr);
426 	lvl1->cnt = palloc->total_cnt;
427 	pble_rsrc->stats_lvl1++;
428 
429 	return 0;
430 }
431 
432 /**
433  * get_lvl1_lvl2_pble - calls get_lvl1 and get_lvl2 pble routine
434  * @pble_rsrc: pble resources
435  * @palloc: contains all inforamtion regarding pble (idx + pble addr)
436  * @level1_only: flag for a level 1 PBLE
437  */
438 static enum irdma_status_code
439 get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
440 		   struct irdma_pble_alloc *palloc, bool level1_only)
441 {
442 	enum irdma_status_code status = 0;
443 
444 	status = get_lvl1_pble(pble_rsrc, palloc);
445 	if (!status || level1_only || palloc->total_cnt <= PBLE_PER_PAGE)
446 		return status;
447 
448 	status = get_lvl2_pble(pble_rsrc, palloc);
449 
450 	return status;
451 }
452 
453 /**
454  * irdma_get_pble - allocate pbles from the prm
455  * @pble_rsrc: pble resources
456  * @palloc: contains all inforamtion regarding pble (idx + pble addr)
457  * @pble_cnt: #of pbles requested
458  * @level1_only: true if only pble level 1 to acquire
459  */
460 enum irdma_status_code irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
461 				      struct irdma_pble_alloc *palloc,
462 				      u32 pble_cnt, bool level1_only)
463 {
464 	enum irdma_status_code status = 0;
465 	int max_sds = 0;
466 	int i;
467 
468 	palloc->total_cnt = pble_cnt;
469 	palloc->level = PBLE_LEVEL_0;
470 
471 	mutex_lock(&pble_rsrc->pble_mutex_lock);
472 
473 	/*check first to see if we can get pble's without acquiring
474 	 * additional sd's
475 	 */
476 	status = get_lvl1_lvl2_pble(pble_rsrc, palloc, level1_only);
477 	if (!status)
478 		goto exit;
479 
480 	max_sds = (palloc->total_cnt >> 18) + 1;
481 	for (i = 0; i < max_sds; i++) {
482 		status = add_pble_prm(pble_rsrc);
483 		if (status)
484 			break;
485 
486 		status = get_lvl1_lvl2_pble(pble_rsrc, palloc, level1_only);
487 		/* if level1_only, only go through it once */
488 		if (!status || level1_only)
489 			break;
490 	}
491 
492 exit:
493 	if (!status) {
494 		pble_rsrc->allocdpbles += pble_cnt;
495 		pble_rsrc->stats_alloc_ok++;
496 	} else {
497 		pble_rsrc->stats_alloc_fail++;
498 	}
499 	mutex_unlock(&pble_rsrc->pble_mutex_lock);
500 
501 	return status;
502 }
503 
504 /**
505  * irdma_free_pble - put pbles back into prm
506  * @pble_rsrc: pble resources
507  * @palloc: contains all information regarding pble resource being freed
508  */
509 void irdma_free_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
510 		     struct irdma_pble_alloc *palloc)
511 {
512 	pble_rsrc->freedpbles += palloc->total_cnt;
513 
514 	if (palloc->level == PBLE_LEVEL_2)
515 		free_lvl2(pble_rsrc, palloc);
516 	else
517 		irdma_prm_return_pbles(&pble_rsrc->pinfo,
518 				       &palloc->level1.chunkinfo);
519 	pble_rsrc->stats_alloc_freed++;
520 }
521