xref: /openbmc/linux/drivers/accel/habanalabs/common/memory.c (revision 25ebbc57ca56df3cf9149e9da6b1d3169c8487db)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Copyright 2016-2022 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7 
8 #include <uapi/drm/habanalabs_accel.h>
9 #include "habanalabs.h"
10 #include "../include/hw_ip/mmu/mmu_general.h"
11 
12 #include <linux/uaccess.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/pci-p2pdma.h>
16 
17 MODULE_IMPORT_NS(DMA_BUF);
18 
19 #define HL_MMU_DEBUG	0
20 
21 /* use small pages for supporting non-pow2 (32M/40M/48M) DRAM phys page sizes */
22 #define DRAM_POOL_PAGE_SIZE	SZ_8M
23 
24 #define MEM_HANDLE_INVALID	ULONG_MAX
25 
26 static int allocate_timestamps_buffers(struct hl_fpriv *hpriv,
27 			struct hl_mem_in *args, u64 *handle);
28 
29 static int set_alloc_page_size(struct hl_device *hdev, struct hl_mem_in *args, u32 *page_size)
30 {
31 	struct asic_fixed_properties *prop = &hdev->asic_prop;
32 	u64 psize;
33 
34 	/*
35 	 * for ASIC that supports setting the allocation page size by user we will address
36 	 * user's choice only if it is not 0 (as 0 means taking the default page size)
37 	 */
38 	if (prop->supports_user_set_page_size && args->alloc.page_size) {
39 		psize = args->alloc.page_size;
40 
41 		if (!is_power_of_2(psize)) {
42 			dev_err(hdev->dev, "user page size (%#llx) is not power of 2\n", psize);
43 			return -EINVAL;
44 		}
45 	} else {
46 		psize = prop->device_mem_alloc_default_page_size;
47 	}
48 
49 	*page_size = psize;
50 
51 	return 0;
52 }
53 
54 /*
55  * The va ranges in context object contain a list with the available chunks of
56  * device virtual memory.
57  * There is one range for host allocations and one for DRAM allocations.
58  *
59  * On initialization each range contains one chunk of all of its available
60  * virtual range which is a half of the total device virtual range.
61  *
62  * On each mapping of physical pages, a suitable virtual range chunk (with a
63  * minimum size) is selected from the list. If the chunk size equals the
64  * requested size, the chunk is returned. Otherwise, the chunk is split into
65  * two chunks - one to return as result and a remainder to stay in the list.
66  *
67  * On each Unmapping of a virtual address, the relevant virtual chunk is
68  * returned to the list. The chunk is added to the list and if its edges match
69  * the edges of the adjacent chunks (means a contiguous chunk can be created),
70  * the chunks are merged.
71  *
72  * On finish, the list is checked to have only one chunk of all the relevant
73  * virtual range (which is a half of the device total virtual range).
74  * If not (means not all mappings were unmapped), a warning is printed.
75  */
76 
77 /*
78  * alloc_device_memory() - allocate device memory.
79  * @ctx: pointer to the context structure.
80  * @args: host parameters containing the requested size.
81  * @ret_handle: result handle.
82  *
83  * This function does the following:
84  * - Allocate the requested size rounded up to 'dram_page_size' pages.
85  * - Return unique handle for later map/unmap/free.
86  */
87 static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args,
88 				u32 *ret_handle)
89 {
90 	struct hl_device *hdev = ctx->hdev;
91 	struct hl_vm *vm = &hdev->vm;
92 	struct hl_vm_phys_pg_pack *phys_pg_pack;
93 	u64 paddr = 0, total_size, num_pgs, i;
94 	u32 num_curr_pgs, page_size;
95 	bool contiguous;
96 	int handle, rc;
97 
98 	num_curr_pgs = 0;
99 
100 	rc = set_alloc_page_size(hdev, args, &page_size);
101 	if (rc)
102 		return rc;
103 
104 	num_pgs = DIV_ROUND_UP_ULL(args->alloc.mem_size, page_size);
105 	total_size = num_pgs * page_size;
106 
107 	if (!total_size) {
108 		dev_err(hdev->dev, "Cannot allocate 0 bytes\n");
109 		return -EINVAL;
110 	}
111 
112 	contiguous = args->flags & HL_MEM_CONTIGUOUS;
113 
114 	if (contiguous) {
115 		if (is_power_of_2(page_size))
116 			paddr = (uintptr_t) gen_pool_dma_alloc_align(vm->dram_pg_pool,
117 								     total_size, NULL, page_size);
118 		else
119 			paddr = gen_pool_alloc(vm->dram_pg_pool, total_size);
120 		if (!paddr) {
121 			dev_err(hdev->dev,
122 				"Cannot allocate %llu contiguous pages with total size of %llu\n",
123 				num_pgs, total_size);
124 			return -ENOMEM;
125 		}
126 	}
127 
128 	phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
129 	if (!phys_pg_pack) {
130 		rc = -ENOMEM;
131 		goto pages_pack_err;
132 	}
133 
134 	phys_pg_pack->vm_type = VM_TYPE_PHYS_PACK;
135 	phys_pg_pack->asid = ctx->asid;
136 	phys_pg_pack->npages = num_pgs;
137 	phys_pg_pack->page_size = page_size;
138 	phys_pg_pack->total_size = total_size;
139 	phys_pg_pack->flags = args->flags;
140 	phys_pg_pack->contiguous = contiguous;
141 
142 	phys_pg_pack->pages = kvmalloc_array(num_pgs, sizeof(u64), GFP_KERNEL);
143 	if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
144 		rc = -ENOMEM;
145 		goto pages_arr_err;
146 	}
147 
148 	if (phys_pg_pack->contiguous) {
149 		for (i = 0 ; i < num_pgs ; i++)
150 			phys_pg_pack->pages[i] = paddr + i * page_size;
151 	} else {
152 		for (i = 0 ; i < num_pgs ; i++) {
153 			if (is_power_of_2(page_size))
154 				phys_pg_pack->pages[i] =
155 					(uintptr_t)gen_pool_dma_alloc_align(vm->dram_pg_pool,
156 									    page_size, NULL,
157 									    page_size);
158 			else
159 				phys_pg_pack->pages[i] = gen_pool_alloc(vm->dram_pg_pool,
160 									page_size);
161 
162 			if (!phys_pg_pack->pages[i]) {
163 				dev_err(hdev->dev,
164 					"Cannot allocate device memory (out of memory)\n");
165 				rc = -ENOMEM;
166 				goto page_err;
167 			}
168 
169 			num_curr_pgs++;
170 		}
171 	}
172 
173 	spin_lock(&vm->idr_lock);
174 	handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0,
175 				GFP_ATOMIC);
176 	spin_unlock(&vm->idr_lock);
177 
178 	if (handle < 0) {
179 		dev_err(hdev->dev, "Failed to get handle for page\n");
180 		rc = -EFAULT;
181 		goto idr_err;
182 	}
183 
184 	for (i = 0 ; i < num_pgs ; i++)
185 		kref_get(&vm->dram_pg_pool_refcount);
186 
187 	phys_pg_pack->handle = handle;
188 
189 	atomic64_add(phys_pg_pack->total_size, &ctx->dram_phys_mem);
190 	atomic64_add(phys_pg_pack->total_size, &hdev->dram_used_mem);
191 
192 	*ret_handle = handle;
193 
194 	return 0;
195 
196 idr_err:
197 page_err:
198 	if (!phys_pg_pack->contiguous)
199 		for (i = 0 ; i < num_curr_pgs ; i++)
200 			gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i],
201 					page_size);
202 
203 	kvfree(phys_pg_pack->pages);
204 pages_arr_err:
205 	kfree(phys_pg_pack);
206 pages_pack_err:
207 	if (contiguous)
208 		gen_pool_free(vm->dram_pg_pool, paddr, total_size);
209 
210 	return rc;
211 }
212 
213 /**
214  * dma_map_host_va() - DMA mapping of the given host virtual address.
215  * @hdev: habanalabs device structure.
216  * @addr: the host virtual address of the memory area.
217  * @size: the size of the memory area.
218  * @p_userptr: pointer to result userptr structure.
219  *
220  * This function does the following:
221  * - Allocate userptr structure.
222  * - Pin the given host memory using the userptr structure.
223  * - Perform DMA mapping to have the DMA addresses of the pages.
224  */
225 static int dma_map_host_va(struct hl_device *hdev, u64 addr, u64 size,
226 				struct hl_userptr **p_userptr)
227 {
228 	struct hl_userptr *userptr;
229 	int rc;
230 
231 	userptr = kzalloc(sizeof(*userptr), GFP_KERNEL);
232 	if (!userptr) {
233 		rc = -ENOMEM;
234 		goto userptr_err;
235 	}
236 
237 	rc = hl_pin_host_memory(hdev, addr, size, userptr);
238 	if (rc)
239 		goto pin_err;
240 
241 	userptr->dma_mapped = true;
242 	userptr->dir = DMA_BIDIRECTIONAL;
243 	userptr->vm_type = VM_TYPE_USERPTR;
244 
245 	*p_userptr = userptr;
246 
247 	rc = hdev->asic_funcs->asic_dma_map_sgtable(hdev, userptr->sgt, DMA_BIDIRECTIONAL);
248 	if (rc) {
249 		dev_err(hdev->dev, "failed to map sgt with DMA region\n");
250 		goto dma_map_err;
251 	}
252 
253 	return 0;
254 
255 dma_map_err:
256 	hl_unpin_host_memory(hdev, userptr);
257 pin_err:
258 	kfree(userptr);
259 userptr_err:
260 
261 	return rc;
262 }
263 
264 /**
265  * dma_unmap_host_va() - DMA unmapping of the given host virtual address.
266  * @hdev: habanalabs device structure.
267  * @userptr: userptr to free.
268  *
269  * This function does the following:
270  * - Unpins the physical pages.
271  * - Frees the userptr structure.
272  */
273 static void dma_unmap_host_va(struct hl_device *hdev,
274 				struct hl_userptr *userptr)
275 {
276 	hl_unpin_host_memory(hdev, userptr);
277 	kfree(userptr);
278 }
279 
280 /**
281  * dram_pg_pool_do_release() - free DRAM pages pool
282  * @ref: pointer to reference object.
283  *
284  * This function does the following:
285  * - Frees the idr structure of physical pages handles.
286  * - Frees the generic pool of DRAM physical pages.
287  */
288 static void dram_pg_pool_do_release(struct kref *ref)
289 {
290 	struct hl_vm *vm = container_of(ref, struct hl_vm,
291 			dram_pg_pool_refcount);
292 
293 	/*
294 	 * free the idr here as only here we know for sure that there are no
295 	 * allocated physical pages and hence there are no handles in use
296 	 */
297 	idr_destroy(&vm->phys_pg_pack_handles);
298 	gen_pool_destroy(vm->dram_pg_pool);
299 }
300 
301 /**
302  * free_phys_pg_pack() - free physical page pack.
303  * @hdev: habanalabs device structure.
304  * @phys_pg_pack: physical page pack to free.
305  *
306  * This function does the following:
307  * - For DRAM memory only
308  *   - iterate over the pack, free each physical block structure by
309  *     returning it to the general pool.
310  * - Free the hl_vm_phys_pg_pack structure.
311  */
312 static void free_phys_pg_pack(struct hl_device *hdev,
313 				struct hl_vm_phys_pg_pack *phys_pg_pack)
314 {
315 	struct hl_vm *vm = &hdev->vm;
316 	u64 i;
317 
318 	if (phys_pg_pack->created_from_userptr)
319 		goto end;
320 
321 	if (phys_pg_pack->contiguous) {
322 		gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[0],
323 			phys_pg_pack->total_size);
324 
325 		for (i = 0; i < phys_pg_pack->npages ; i++)
326 			kref_put(&vm->dram_pg_pool_refcount,
327 				dram_pg_pool_do_release);
328 	} else {
329 		for (i = 0 ; i < phys_pg_pack->npages ; i++) {
330 			gen_pool_free(vm->dram_pg_pool,
331 				phys_pg_pack->pages[i],
332 				phys_pg_pack->page_size);
333 			kref_put(&vm->dram_pg_pool_refcount,
334 				dram_pg_pool_do_release);
335 		}
336 	}
337 
338 end:
339 	kvfree(phys_pg_pack->pages);
340 	kfree(phys_pg_pack);
341 
342 	return;
343 }
344 
345 /**
346  * free_device_memory() - free device memory.
347  * @ctx: pointer to the context structure.
348  * @args: host parameters containing the requested size.
349  *
350  * This function does the following:
351  * - Free the device memory related to the given handle.
352  */
353 static int free_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args)
354 {
355 	struct hl_device *hdev = ctx->hdev;
356 	struct hl_vm *vm = &hdev->vm;
357 	struct hl_vm_phys_pg_pack *phys_pg_pack;
358 	u32 handle = args->free.handle;
359 
360 	spin_lock(&vm->idr_lock);
361 	phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
362 	if (!phys_pg_pack) {
363 		spin_unlock(&vm->idr_lock);
364 		dev_err(hdev->dev, "free device memory failed, no match for handle %u\n", handle);
365 		return -EINVAL;
366 	}
367 
368 	if (atomic_read(&phys_pg_pack->mapping_cnt) > 0) {
369 		spin_unlock(&vm->idr_lock);
370 		dev_err(hdev->dev, "handle %u is mapped, cannot free\n", handle);
371 		return -EINVAL;
372 	}
373 
374 	/* must remove from idr before the freeing of the physical pages as the refcount of the pool
375 	 * is also the trigger of the idr destroy
376 	 */
377 	idr_remove(&vm->phys_pg_pack_handles, handle);
378 	spin_unlock(&vm->idr_lock);
379 
380 	atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem);
381 	atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem);
382 
383 	free_phys_pg_pack(hdev, phys_pg_pack);
384 
385 	return 0;
386 }
387 
388 /**
389  * clear_va_list_locked() - free virtual addresses list.
390  * @hdev: habanalabs device structure.
391  * @va_list: list of virtual addresses to free.
392  *
393  * This function does the following:
394  * - Iterate over the list and free each virtual addresses block.
395  *
396  * This function should be called only when va_list lock is taken.
397  */
398 static void clear_va_list_locked(struct hl_device *hdev,
399 		struct list_head *va_list)
400 {
401 	struct hl_vm_va_block *va_block, *tmp;
402 
403 	list_for_each_entry_safe(va_block, tmp, va_list, node) {
404 		list_del(&va_block->node);
405 		kfree(va_block);
406 	}
407 }
408 
409 /**
410  * print_va_list_locked() - print virtual addresses list.
411  * @hdev: habanalabs device structure.
412  * @va_list: list of virtual addresses to print.
413  *
414  * This function does the following:
415  * - Iterate over the list and print each virtual addresses block.
416  *
417  * This function should be called only when va_list lock is taken.
418  */
419 static void print_va_list_locked(struct hl_device *hdev,
420 		struct list_head *va_list)
421 {
422 #if HL_MMU_DEBUG
423 	struct hl_vm_va_block *va_block;
424 
425 	dev_dbg(hdev->dev, "print va list:\n");
426 
427 	list_for_each_entry(va_block, va_list, node)
428 		dev_dbg(hdev->dev,
429 			"va block, start: 0x%llx, end: 0x%llx, size: %llu\n",
430 			va_block->start, va_block->end, va_block->size);
431 #endif
432 }
433 
434 /**
435  * merge_va_blocks_locked() - merge a virtual block if possible.
436  * @hdev: pointer to the habanalabs device structure.
437  * @va_list: pointer to the virtual addresses block list.
438  * @va_block: virtual block to merge with adjacent blocks.
439  *
440  * This function does the following:
441  * - Merge the given blocks with the adjacent blocks if their virtual ranges
442  *   create a contiguous virtual range.
443  *
444  * This Function should be called only when va_list lock is taken.
445  */
446 static void merge_va_blocks_locked(struct hl_device *hdev,
447 		struct list_head *va_list, struct hl_vm_va_block *va_block)
448 {
449 	struct hl_vm_va_block *prev, *next;
450 
451 	prev = list_prev_entry(va_block, node);
452 	if (&prev->node != va_list && prev->end + 1 == va_block->start) {
453 		prev->end = va_block->end;
454 		prev->size = prev->end - prev->start + 1;
455 		list_del(&va_block->node);
456 		kfree(va_block);
457 		va_block = prev;
458 	}
459 
460 	next = list_next_entry(va_block, node);
461 	if (&next->node != va_list && va_block->end + 1 == next->start) {
462 		next->start = va_block->start;
463 		next->size = next->end - next->start + 1;
464 		list_del(&va_block->node);
465 		kfree(va_block);
466 	}
467 }
468 
469 /**
470  * add_va_block_locked() - add a virtual block to the virtual addresses list.
471  * @hdev: pointer to the habanalabs device structure.
472  * @va_list: pointer to the virtual addresses block list.
473  * @start: start virtual address.
474  * @end: end virtual address.
475  *
476  * This function does the following:
477  * - Add the given block to the virtual blocks list and merge with other blocks
478  *   if a contiguous virtual block can be created.
479  *
480  * This Function should be called only when va_list lock is taken.
481  */
482 static int add_va_block_locked(struct hl_device *hdev,
483 		struct list_head *va_list, u64 start, u64 end)
484 {
485 	struct hl_vm_va_block *va_block, *res = NULL;
486 	u64 size = end - start + 1;
487 
488 	print_va_list_locked(hdev, va_list);
489 
490 	list_for_each_entry(va_block, va_list, node) {
491 		/* TODO: remove upon matureness */
492 		if (hl_mem_area_crosses_range(start, size, va_block->start,
493 				va_block->end)) {
494 			dev_err(hdev->dev,
495 				"block crossing ranges at start 0x%llx, end 0x%llx\n",
496 				va_block->start, va_block->end);
497 			return -EINVAL;
498 		}
499 
500 		if (va_block->end < start)
501 			res = va_block;
502 	}
503 
504 	va_block = kmalloc(sizeof(*va_block), GFP_KERNEL);
505 	if (!va_block)
506 		return -ENOMEM;
507 
508 	va_block->start = start;
509 	va_block->end = end;
510 	va_block->size = size;
511 
512 	if (!res)
513 		list_add(&va_block->node, va_list);
514 	else
515 		list_add(&va_block->node, &res->node);
516 
517 	merge_va_blocks_locked(hdev, va_list, va_block);
518 
519 	print_va_list_locked(hdev, va_list);
520 
521 	return 0;
522 }
523 
524 /**
525  * add_va_block() - wrapper for add_va_block_locked.
526  * @hdev: pointer to the habanalabs device structure.
527  * @va_range: pointer to the virtual addresses range object.
528  * @start: start virtual address.
529  * @end: end virtual address.
530  *
531  * This function does the following:
532  * - Takes the list lock and calls add_va_block_locked.
533  */
534 static inline int add_va_block(struct hl_device *hdev,
535 		struct hl_va_range *va_range, u64 start, u64 end)
536 {
537 	int rc;
538 
539 	mutex_lock(&va_range->lock);
540 	rc = add_va_block_locked(hdev, &va_range->list, start, end);
541 	mutex_unlock(&va_range->lock);
542 
543 	return rc;
544 }
545 
546 /**
547  * is_hint_crossing_range() - check if hint address crossing specified reserved.
548  * @range_type: virtual space range type.
549  * @start_addr: start virtual address.
550  * @size: block size.
551  * @prop: asic properties structure to retrieve reserved ranges from.
552  */
553 static inline bool is_hint_crossing_range(enum hl_va_range_type range_type,
554 		u64 start_addr, u32 size, struct asic_fixed_properties *prop) {
555 	bool range_cross;
556 
557 	if (range_type == HL_VA_RANGE_TYPE_DRAM)
558 		range_cross =
559 			hl_mem_area_crosses_range(start_addr, size,
560 			prop->hints_dram_reserved_va_range.start_addr,
561 			prop->hints_dram_reserved_va_range.end_addr);
562 	else if (range_type == HL_VA_RANGE_TYPE_HOST)
563 		range_cross =
564 			hl_mem_area_crosses_range(start_addr,	size,
565 			prop->hints_host_reserved_va_range.start_addr,
566 			prop->hints_host_reserved_va_range.end_addr);
567 	else
568 		range_cross =
569 			hl_mem_area_crosses_range(start_addr, size,
570 			prop->hints_host_hpage_reserved_va_range.start_addr,
571 			prop->hints_host_hpage_reserved_va_range.end_addr);
572 
573 	return range_cross;
574 }
575 
576 /**
577  * get_va_block() - get a virtual block for the given size and alignment.
578  *
579  * @hdev: pointer to the habanalabs device structure.
580  * @va_range: pointer to the virtual addresses range.
581  * @size: requested block size.
582  * @hint_addr: hint for requested address by the user.
583  * @va_block_align: required alignment of the virtual block start address.
584  * @range_type: va range type (host, dram)
585  * @flags: additional memory flags, currently only uses HL_MEM_FORCE_HINT
586  *
587  * This function does the following:
588  * - Iterate on the virtual block list to find a suitable virtual block for the
589  *   given size, hint address and alignment.
590  * - Reserve the requested block and update the list.
591  * - Return the start address of the virtual block.
592  */
593 static u64 get_va_block(struct hl_device *hdev,
594 				struct hl_va_range *va_range,
595 				u64 size, u64 hint_addr, u32 va_block_align,
596 				enum hl_va_range_type range_type,
597 				u32 flags)
598 {
599 	struct hl_vm_va_block *va_block, *new_va_block = NULL;
600 	struct asic_fixed_properties *prop = &hdev->asic_prop;
601 	u64 tmp_hint_addr, valid_start, valid_size, prev_start, prev_end,
602 		align_mask, reserved_valid_start = 0, reserved_valid_size = 0,
603 		dram_hint_mask = prop->dram_hints_align_mask;
604 	bool add_prev = false;
605 	bool is_align_pow_2  = is_power_of_2(va_range->page_size);
606 	bool is_hint_dram_addr = hl_is_dram_va(hdev, hint_addr);
607 	bool force_hint = flags & HL_MEM_FORCE_HINT;
608 
609 	if (is_align_pow_2)
610 		align_mask = ~((u64)va_block_align - 1);
611 	else
612 		/*
613 		 * with non-power-of-2 range we work only with page granularity
614 		 * and the start address is page aligned,
615 		 * so no need for alignment checking.
616 		 */
617 		size = DIV_ROUND_UP_ULL(size, va_range->page_size) *
618 							va_range->page_size;
619 
620 	tmp_hint_addr = hint_addr & ~dram_hint_mask;
621 
622 	/* Check if we need to ignore hint address */
623 	if ((is_align_pow_2 && (hint_addr & (va_block_align - 1))) ||
624 			(!is_align_pow_2 && is_hint_dram_addr &&
625 			do_div(tmp_hint_addr, va_range->page_size))) {
626 
627 		if (force_hint) {
628 			/* Hint must be respected, so here we just fail */
629 			dev_err(hdev->dev,
630 				"Hint address 0x%llx is not page aligned - cannot be respected\n",
631 				hint_addr);
632 			return 0;
633 		}
634 
635 		dev_dbg(hdev->dev,
636 			"Hint address 0x%llx will be ignored because it is not aligned\n",
637 			hint_addr);
638 		hint_addr = 0;
639 	}
640 
641 	mutex_lock(&va_range->lock);
642 
643 	print_va_list_locked(hdev, &va_range->list);
644 
645 	list_for_each_entry(va_block, &va_range->list, node) {
646 		/* Calc the first possible aligned addr */
647 		valid_start = va_block->start;
648 
649 		if (is_align_pow_2 && (valid_start & (va_block_align - 1))) {
650 			valid_start &= align_mask;
651 			valid_start += va_block_align;
652 			if (valid_start > va_block->end)
653 				continue;
654 		}
655 
656 		valid_size = va_block->end - valid_start + 1;
657 		if (valid_size < size)
658 			continue;
659 
660 		/*
661 		 * In case hint address is 0, and hints_range_reservation
662 		 * property enabled, then avoid allocating va blocks from the
663 		 * range reserved for hint addresses
664 		 */
665 		if (prop->hints_range_reservation && !hint_addr)
666 			if (is_hint_crossing_range(range_type, valid_start,
667 					size, prop))
668 				continue;
669 
670 		/* Pick the minimal length block which has the required size */
671 		if (!new_va_block || (valid_size < reserved_valid_size)) {
672 			new_va_block = va_block;
673 			reserved_valid_start = valid_start;
674 			reserved_valid_size = valid_size;
675 		}
676 
677 		if (hint_addr && hint_addr >= valid_start &&
678 					(hint_addr + size) <= va_block->end) {
679 			new_va_block = va_block;
680 			reserved_valid_start = hint_addr;
681 			reserved_valid_size = valid_size;
682 			break;
683 		}
684 	}
685 
686 	if (!new_va_block) {
687 		dev_err(hdev->dev, "no available va block for size %llu\n",
688 								size);
689 		goto out;
690 	}
691 
692 	if (force_hint && reserved_valid_start != hint_addr) {
693 		/* Hint address must be respected. If we are here - this means
694 		 * we could not respect it.
695 		 */
696 		dev_err(hdev->dev,
697 			"Hint address 0x%llx could not be respected\n",
698 			hint_addr);
699 		reserved_valid_start = 0;
700 		goto out;
701 	}
702 
703 	/*
704 	 * Check if there is some leftover range due to reserving the new
705 	 * va block, then return it to the main virtual addresses list.
706 	 */
707 	if (reserved_valid_start > new_va_block->start) {
708 		prev_start = new_va_block->start;
709 		prev_end = reserved_valid_start - 1;
710 
711 		new_va_block->start = reserved_valid_start;
712 		new_va_block->size = reserved_valid_size;
713 
714 		add_prev = true;
715 	}
716 
717 	if (new_va_block->size > size) {
718 		new_va_block->start += size;
719 		new_va_block->size = new_va_block->end - new_va_block->start + 1;
720 	} else {
721 		list_del(&new_va_block->node);
722 		kfree(new_va_block);
723 	}
724 
725 	if (add_prev)
726 		add_va_block_locked(hdev, &va_range->list, prev_start,
727 				prev_end);
728 
729 	print_va_list_locked(hdev, &va_range->list);
730 out:
731 	mutex_unlock(&va_range->lock);
732 
733 	return reserved_valid_start;
734 }
735 
736 /*
737  * hl_reserve_va_block() - reserve a virtual block of a given size.
738  * @hdev: pointer to the habanalabs device structure.
739  * @ctx: current context
740  * @type: virtual addresses range type.
741  * @size: requested block size.
742  * @alignment: required alignment in bytes of the virtual block start address,
743  *             0 means no alignment.
744  *
745  * This function does the following:
746  * - Iterate on the virtual block list to find a suitable virtual block for the
747  *   given size and alignment.
748  * - Reserve the requested block and update the list.
749  * - Return the start address of the virtual block.
750  */
751 u64 hl_reserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
752 		enum hl_va_range_type type, u64 size, u32 alignment)
753 {
754 	return get_va_block(hdev, ctx->va_range[type], size, 0,
755 			max(alignment, ctx->va_range[type]->page_size),
756 			type, 0);
757 }
758 
759 /**
760  * hl_get_va_range_type() - get va_range type for the given address and size.
761  * @ctx: context to fetch va_range from.
762  * @address: the start address of the area we want to validate.
763  * @size: the size in bytes of the area we want to validate.
764  * @type: returned va_range type.
765  *
766  * Return: true if the area is inside a valid range, false otherwise.
767  */
768 static int hl_get_va_range_type(struct hl_ctx *ctx, u64 address, u64 size,
769 			enum hl_va_range_type *type)
770 {
771 	int i;
772 
773 	for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX; i++) {
774 		if (hl_mem_area_inside_range(address, size,
775 				ctx->va_range[i]->start_addr,
776 				ctx->va_range[i]->end_addr)) {
777 			*type = i;
778 			return 0;
779 		}
780 	}
781 
782 	return -EINVAL;
783 }
784 
785 /**
786  * hl_unreserve_va_block() - wrapper for add_va_block to unreserve a va block.
787  * @hdev: pointer to the habanalabs device structure
788  * @ctx: pointer to the context structure.
789  * @start_addr: start virtual address.
790  * @size: number of bytes to unreserve.
791  *
792  * This function does the following:
793  * - Takes the list lock and calls add_va_block_locked.
794  */
795 int hl_unreserve_va_block(struct hl_device *hdev, struct hl_ctx *ctx,
796 		u64 start_addr, u64 size)
797 {
798 	enum hl_va_range_type type;
799 	int rc;
800 
801 	rc = hl_get_va_range_type(ctx, start_addr, size, &type);
802 	if (rc) {
803 		dev_err(hdev->dev,
804 			"cannot find va_range for va %#llx size %llu",
805 			start_addr, size);
806 		return rc;
807 	}
808 
809 	rc = add_va_block(hdev, ctx->va_range[type], start_addr,
810 						start_addr + size - 1);
811 	if (rc)
812 		dev_warn(hdev->dev,
813 			"add va block failed for vaddr: 0x%llx\n", start_addr);
814 
815 	return rc;
816 }
817 
818 /**
819  * init_phys_pg_pack_from_userptr() - initialize physical page pack from host
820  *                                    memory
821  * @ctx: pointer to the context structure.
822  * @userptr: userptr to initialize from.
823  * @pphys_pg_pack: result pointer.
824  * @force_regular_page: tell the function to ignore huge page optimization,
825  *                      even if possible. Needed for cases where the device VA
826  *                      is allocated before we know the composition of the
827  *                      physical pages
828  *
829  * This function does the following:
830  * - Pin the physical pages related to the given virtual block.
831  * - Create a physical page pack from the physical pages related to the given
832  *   virtual block.
833  */
834 static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx,
835 				struct hl_userptr *userptr,
836 				struct hl_vm_phys_pg_pack **pphys_pg_pack,
837 				bool force_regular_page)
838 {
839 	u32 npages, page_size = PAGE_SIZE,
840 		huge_page_size = ctx->hdev->asic_prop.pmmu_huge.page_size;
841 	u32 pgs_in_huge_page = huge_page_size >> __ffs(page_size);
842 	struct hl_vm_phys_pg_pack *phys_pg_pack;
843 	bool first = true, is_huge_page_opt;
844 	u64 page_mask, total_npages;
845 	struct scatterlist *sg;
846 	dma_addr_t dma_addr;
847 	int rc, i, j;
848 
849 	phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL);
850 	if (!phys_pg_pack)
851 		return -ENOMEM;
852 
853 	phys_pg_pack->vm_type = userptr->vm_type;
854 	phys_pg_pack->created_from_userptr = true;
855 	phys_pg_pack->asid = ctx->asid;
856 	atomic_set(&phys_pg_pack->mapping_cnt, 1);
857 
858 	is_huge_page_opt = (force_regular_page ? false : true);
859 
860 	/* Only if all dma_addrs are aligned to 2MB and their
861 	 * sizes is at least 2MB, we can use huge page mapping.
862 	 * We limit the 2MB optimization to this condition,
863 	 * since later on we acquire the related VA range as one
864 	 * consecutive block.
865 	 */
866 	total_npages = 0;
867 	for_each_sgtable_dma_sg(userptr->sgt, sg, i) {
868 		npages = hl_get_sg_info(sg, &dma_addr);
869 
870 		total_npages += npages;
871 
872 		if ((npages % pgs_in_huge_page) ||
873 					(dma_addr & (huge_page_size - 1)))
874 			is_huge_page_opt = false;
875 	}
876 
877 	if (is_huge_page_opt) {
878 		page_size = huge_page_size;
879 		do_div(total_npages, pgs_in_huge_page);
880 	}
881 
882 	page_mask = ~(((u64) page_size) - 1);
883 
884 	phys_pg_pack->pages = kvmalloc_array(total_npages, sizeof(u64),
885 						GFP_KERNEL);
886 	if (ZERO_OR_NULL_PTR(phys_pg_pack->pages)) {
887 		rc = -ENOMEM;
888 		goto page_pack_arr_mem_err;
889 	}
890 
891 	phys_pg_pack->npages = total_npages;
892 	phys_pg_pack->page_size = page_size;
893 	phys_pg_pack->total_size = total_npages * page_size;
894 
895 	j = 0;
896 	for_each_sgtable_dma_sg(userptr->sgt, sg, i) {
897 		npages = hl_get_sg_info(sg, &dma_addr);
898 
899 		/* align down to physical page size and save the offset */
900 		if (first) {
901 			first = false;
902 			phys_pg_pack->offset = dma_addr & (page_size - 1);
903 			dma_addr &= page_mask;
904 		}
905 
906 		while (npages) {
907 			phys_pg_pack->pages[j++] = dma_addr;
908 			dma_addr += page_size;
909 
910 			if (is_huge_page_opt)
911 				npages -= pgs_in_huge_page;
912 			else
913 				npages--;
914 		}
915 	}
916 
917 	*pphys_pg_pack = phys_pg_pack;
918 
919 	return 0;
920 
921 page_pack_arr_mem_err:
922 	kfree(phys_pg_pack);
923 
924 	return rc;
925 }
926 
927 /**
928  * map_phys_pg_pack() - maps the physical page pack..
929  * @ctx: pointer to the context structure.
930  * @vaddr: start address of the virtual area to map from.
931  * @phys_pg_pack: the pack of physical pages to map to.
932  *
933  * This function does the following:
934  * - Maps each chunk of virtual memory to matching physical chunk.
935  * - Stores number of successful mappings in the given argument.
936  * - Returns 0 on success, error code otherwise.
937  */
938 static int map_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
939 				struct hl_vm_phys_pg_pack *phys_pg_pack)
940 {
941 	struct hl_device *hdev = ctx->hdev;
942 	u64 next_vaddr = vaddr, paddr, mapped_pg_cnt = 0, i;
943 	u32 page_size = phys_pg_pack->page_size;
944 	int rc = 0;
945 	bool is_host_addr;
946 
947 	for (i = 0 ; i < phys_pg_pack->npages ; i++) {
948 		paddr = phys_pg_pack->pages[i];
949 
950 		rc = hl_mmu_map_page(ctx, next_vaddr, paddr, page_size,
951 				(i + 1) == phys_pg_pack->npages);
952 		if (rc) {
953 			dev_err(hdev->dev,
954 				"map failed for handle %u, npages: %llu, mapped: %llu",
955 				phys_pg_pack->handle, phys_pg_pack->npages,
956 				mapped_pg_cnt);
957 			goto err;
958 		}
959 
960 		mapped_pg_cnt++;
961 		next_vaddr += page_size;
962 	}
963 
964 	return 0;
965 
966 err:
967 	is_host_addr = !hl_is_dram_va(hdev, vaddr);
968 
969 	next_vaddr = vaddr;
970 	for (i = 0 ; i < mapped_pg_cnt ; i++) {
971 		if (hl_mmu_unmap_page(ctx, next_vaddr, page_size,
972 					(i + 1) == mapped_pg_cnt))
973 			dev_warn_ratelimited(hdev->dev,
974 				"failed to unmap handle %u, va: 0x%llx, pa: 0x%llx, page size: %u\n",
975 					phys_pg_pack->handle, next_vaddr,
976 					phys_pg_pack->pages[i], page_size);
977 
978 		next_vaddr += page_size;
979 
980 		/*
981 		 * unmapping on Palladium can be really long, so avoid a CPU
982 		 * soft lockup bug by sleeping a little between unmapping pages
983 		 *
984 		 * In addition, on host num of pages could be huge,
985 		 * because page size could be 4KB, so when unmapping host
986 		 * pages sleep every 32K pages to avoid soft lockup
987 		 */
988 		if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0))
989 			usleep_range(50, 200);
990 	}
991 
992 	return rc;
993 }
994 
995 /**
996  * unmap_phys_pg_pack() - unmaps the physical page pack.
997  * @ctx: pointer to the context structure.
998  * @vaddr: start address of the virtual area to unmap.
999  * @phys_pg_pack: the pack of physical pages to unmap.
1000  */
1001 static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr,
1002 				struct hl_vm_phys_pg_pack *phys_pg_pack)
1003 {
1004 	struct hl_device *hdev = ctx->hdev;
1005 	u64 next_vaddr, i;
1006 	bool is_host_addr;
1007 	u32 page_size;
1008 
1009 	is_host_addr = !hl_is_dram_va(hdev, vaddr);
1010 	page_size = phys_pg_pack->page_size;
1011 	next_vaddr = vaddr;
1012 
1013 	for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) {
1014 		if (hl_mmu_unmap_page(ctx, next_vaddr, page_size,
1015 				       (i + 1) == phys_pg_pack->npages))
1016 			dev_warn_ratelimited(hdev->dev,
1017 			"unmap failed for vaddr: 0x%llx\n", next_vaddr);
1018 
1019 		/*
1020 		 * unmapping on Palladium can be really long, so avoid a CPU
1021 		 * soft lockup bug by sleeping a little between unmapping pages
1022 		 *
1023 		 * In addition, on host num of pages could be huge,
1024 		 * because page size could be 4KB, so when unmapping host
1025 		 * pages sleep every 32K pages to avoid soft lockup
1026 		 */
1027 		if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0))
1028 			usleep_range(50, 200);
1029 	}
1030 }
1031 
1032 static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args,
1033 					u64 *paddr)
1034 {
1035 	struct hl_device *hdev = ctx->hdev;
1036 	struct hl_vm *vm = &hdev->vm;
1037 	struct hl_vm_phys_pg_pack *phys_pg_pack;
1038 	u32 handle;
1039 
1040 	handle = lower_32_bits(args->map_device.handle);
1041 	spin_lock(&vm->idr_lock);
1042 	phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
1043 	if (!phys_pg_pack) {
1044 		spin_unlock(&vm->idr_lock);
1045 		dev_err(hdev->dev, "no match for handle %u\n", handle);
1046 		return -EINVAL;
1047 	}
1048 
1049 	*paddr = phys_pg_pack->pages[0];
1050 
1051 	spin_unlock(&vm->idr_lock);
1052 
1053 	return 0;
1054 }
1055 
1056 /**
1057  * map_device_va() - map the given memory.
1058  * @ctx: pointer to the context structure.
1059  * @args: host parameters with handle/host virtual address.
1060  * @device_addr: pointer to result device virtual address.
1061  *
1062  * This function does the following:
1063  * - If given a physical device memory handle, map to a device virtual block
1064  *   and return the start address of this block.
1065  * - If given a host virtual address and size, find the related physical pages,
1066  *   map a device virtual block to this pages and return the start address of
1067  *   this block.
1068  */
1069 static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, u64 *device_addr)
1070 {
1071 	struct hl_vm_phys_pg_pack *phys_pg_pack;
1072 	enum hl_va_range_type va_range_type = 0;
1073 	struct hl_device *hdev = ctx->hdev;
1074 	struct hl_userptr *userptr = NULL;
1075 	u32 handle = 0, va_block_align;
1076 	struct hl_vm_hash_node *hnode;
1077 	struct hl_vm *vm = &hdev->vm;
1078 	struct hl_va_range *va_range;
1079 	bool is_userptr, do_prefetch;
1080 	u64 ret_vaddr, hint_addr;
1081 	enum vm_type *vm_type;
1082 	int rc;
1083 
1084 	/* set map flags */
1085 	is_userptr = args->flags & HL_MEM_USERPTR;
1086 	do_prefetch = hdev->supports_mmu_prefetch && (args->flags & HL_MEM_PREFETCH);
1087 
1088 	/* Assume failure */
1089 	*device_addr = 0;
1090 
1091 	if (is_userptr) {
1092 		u64 addr = args->map_host.host_virt_addr,
1093 			size = args->map_host.mem_size;
1094 		u32 page_size = hdev->asic_prop.pmmu.page_size,
1095 			huge_page_size = hdev->asic_prop.pmmu_huge.page_size;
1096 
1097 		rc = dma_map_host_va(hdev, addr, size, &userptr);
1098 		if (rc)
1099 			return rc;
1100 
1101 		rc = init_phys_pg_pack_from_userptr(ctx, userptr,
1102 				&phys_pg_pack, false);
1103 		if (rc) {
1104 			dev_err(hdev->dev,
1105 				"unable to init page pack for vaddr 0x%llx\n",
1106 				addr);
1107 			goto init_page_pack_err;
1108 		}
1109 
1110 		vm_type = (enum vm_type *) userptr;
1111 		hint_addr = args->map_host.hint_addr;
1112 		handle = phys_pg_pack->handle;
1113 
1114 		/* get required alignment */
1115 		if (phys_pg_pack->page_size == page_size) {
1116 			va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST];
1117 			va_range_type = HL_VA_RANGE_TYPE_HOST;
1118 			/*
1119 			 * huge page alignment may be needed in case of regular
1120 			 * page mapping, depending on the host VA alignment
1121 			 */
1122 			if (addr & (huge_page_size - 1))
1123 				va_block_align = page_size;
1124 			else
1125 				va_block_align = huge_page_size;
1126 		} else {
1127 			/*
1128 			 * huge page alignment is needed in case of huge page
1129 			 * mapping
1130 			 */
1131 			va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE];
1132 			va_range_type = HL_VA_RANGE_TYPE_HOST_HUGE;
1133 			va_block_align = huge_page_size;
1134 		}
1135 	} else {
1136 		handle = lower_32_bits(args->map_device.handle);
1137 
1138 		spin_lock(&vm->idr_lock);
1139 		phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle);
1140 		if (!phys_pg_pack) {
1141 			spin_unlock(&vm->idr_lock);
1142 			dev_err(hdev->dev,
1143 				"no match for handle %u\n", handle);
1144 			return -EINVAL;
1145 		}
1146 
1147 		/* increment now to avoid freeing device memory while mapping */
1148 		atomic_inc(&phys_pg_pack->mapping_cnt);
1149 
1150 		spin_unlock(&vm->idr_lock);
1151 
1152 		vm_type = (enum vm_type *) phys_pg_pack;
1153 
1154 		hint_addr = args->map_device.hint_addr;
1155 
1156 		/* DRAM VA alignment is the same as the MMU page size */
1157 		va_range = ctx->va_range[HL_VA_RANGE_TYPE_DRAM];
1158 		va_range_type = HL_VA_RANGE_TYPE_DRAM;
1159 		va_block_align = hdev->asic_prop.dmmu.page_size;
1160 	}
1161 
1162 	/*
1163 	 * relevant for mapping device physical memory only, as host memory is
1164 	 * implicitly shared
1165 	 */
1166 	if (!is_userptr && !(phys_pg_pack->flags & HL_MEM_SHARED) &&
1167 			phys_pg_pack->asid != ctx->asid) {
1168 		dev_err(hdev->dev,
1169 			"Failed to map memory, handle %u is not shared\n",
1170 			handle);
1171 		rc = -EPERM;
1172 		goto shared_err;
1173 	}
1174 
1175 	hnode = kzalloc(sizeof(*hnode), GFP_KERNEL);
1176 	if (!hnode) {
1177 		rc = -ENOMEM;
1178 		goto hnode_err;
1179 	}
1180 
1181 	if (hint_addr && phys_pg_pack->offset) {
1182 		if (args->flags & HL_MEM_FORCE_HINT) {
1183 			/* Fail if hint must be respected but it can't be */
1184 			dev_err(hdev->dev,
1185 				"Hint address 0x%llx cannot be respected because source memory is not aligned 0x%x\n",
1186 				hint_addr, phys_pg_pack->offset);
1187 			rc = -EINVAL;
1188 			goto va_block_err;
1189 		}
1190 		dev_dbg(hdev->dev,
1191 			"Hint address 0x%llx will be ignored because source memory is not aligned 0x%x\n",
1192 			hint_addr, phys_pg_pack->offset);
1193 	}
1194 
1195 	ret_vaddr = get_va_block(hdev, va_range, phys_pg_pack->total_size,
1196 					hint_addr, va_block_align,
1197 					va_range_type, args->flags);
1198 	if (!ret_vaddr) {
1199 		dev_err(hdev->dev, "no available va block for handle %u\n",
1200 				handle);
1201 		rc = -ENOMEM;
1202 		goto va_block_err;
1203 	}
1204 
1205 	mutex_lock(&hdev->mmu_lock);
1206 
1207 	rc = map_phys_pg_pack(ctx, ret_vaddr, phys_pg_pack);
1208 	if (rc) {
1209 		dev_err(hdev->dev, "mapping page pack failed for handle %u\n", handle);
1210 		mutex_unlock(&hdev->mmu_lock);
1211 		goto map_err;
1212 	}
1213 
1214 	rc = hl_mmu_invalidate_cache_range(hdev, false, *vm_type | MMU_OP_SKIP_LOW_CACHE_INV,
1215 				ctx->asid, ret_vaddr, phys_pg_pack->total_size);
1216 	mutex_unlock(&hdev->mmu_lock);
1217 	if (rc)
1218 		goto map_err;
1219 
1220 	/*
1221 	 * prefetch is done upon user's request. it is performed in WQ as and so can
1222 	 * be outside the MMU lock. the operation itself is already protected by the mmu lock
1223 	 */
1224 	if (do_prefetch) {
1225 		rc = hl_mmu_prefetch_cache_range(ctx, *vm_type, ctx->asid, ret_vaddr,
1226 							phys_pg_pack->total_size);
1227 		if (rc)
1228 			goto map_err;
1229 	}
1230 
1231 	ret_vaddr += phys_pg_pack->offset;
1232 
1233 	hnode->ptr = vm_type;
1234 	hnode->vaddr = ret_vaddr;
1235 	hnode->handle = is_userptr ? MEM_HANDLE_INVALID : handle;
1236 
1237 	mutex_lock(&ctx->mem_hash_lock);
1238 	hash_add(ctx->mem_hash, &hnode->node, ret_vaddr);
1239 	mutex_unlock(&ctx->mem_hash_lock);
1240 
1241 	*device_addr = ret_vaddr;
1242 
1243 	if (is_userptr)
1244 		free_phys_pg_pack(hdev, phys_pg_pack);
1245 
1246 	return rc;
1247 
1248 map_err:
1249 	if (add_va_block(hdev, va_range, ret_vaddr,
1250 				ret_vaddr + phys_pg_pack->total_size - 1))
1251 		dev_warn(hdev->dev,
1252 			"release va block failed for handle 0x%x, vaddr: 0x%llx\n",
1253 				handle, ret_vaddr);
1254 
1255 va_block_err:
1256 	kfree(hnode);
1257 hnode_err:
1258 shared_err:
1259 	atomic_dec(&phys_pg_pack->mapping_cnt);
1260 	if (is_userptr)
1261 		free_phys_pg_pack(hdev, phys_pg_pack);
1262 init_page_pack_err:
1263 	if (is_userptr)
1264 		dma_unmap_host_va(hdev, userptr);
1265 
1266 	return rc;
1267 }
1268 
1269 /* Should be called while the context's mem_hash_lock is taken */
1270 static struct hl_vm_hash_node *get_vm_hash_node_locked(struct hl_ctx *ctx, u64 vaddr)
1271 {
1272 	struct hl_vm_hash_node *hnode;
1273 
1274 	hash_for_each_possible(ctx->mem_hash, hnode, node, vaddr)
1275 		if (vaddr == hnode->vaddr)
1276 			return hnode;
1277 
1278 	return NULL;
1279 }
1280 
1281 /**
1282  * unmap_device_va() - unmap the given device virtual address.
1283  * @ctx: pointer to the context structure.
1284  * @args: host parameters with device virtual address to unmap.
1285  * @ctx_free: true if in context free flow, false otherwise.
1286  *
1287  * This function does the following:
1288  * - unmap the physical pages related to the given virtual address.
1289  * - return the device virtual block to the virtual block list.
1290  */
1291 static int unmap_device_va(struct hl_ctx *ctx, struct hl_mem_in *args,
1292 				bool ctx_free)
1293 {
1294 	struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
1295 	u64 vaddr = args->unmap.device_virt_addr;
1296 	struct asic_fixed_properties *prop;
1297 	struct hl_device *hdev = ctx->hdev;
1298 	struct hl_userptr *userptr = NULL;
1299 	struct hl_vm_hash_node *hnode;
1300 	struct hl_va_range *va_range;
1301 	enum vm_type *vm_type;
1302 	bool is_userptr;
1303 	int rc = 0;
1304 
1305 	prop = &hdev->asic_prop;
1306 
1307 	/* protect from double entrance */
1308 	mutex_lock(&ctx->mem_hash_lock);
1309 	hnode = get_vm_hash_node_locked(ctx, vaddr);
1310 	if (!hnode) {
1311 		mutex_unlock(&ctx->mem_hash_lock);
1312 		dev_err(hdev->dev, "unmap failed, no mem hnode for vaddr 0x%llx\n", vaddr);
1313 		return -EINVAL;
1314 	}
1315 
1316 	if (hnode->export_cnt) {
1317 		mutex_unlock(&ctx->mem_hash_lock);
1318 		dev_err(hdev->dev, "failed to unmap %#llx, memory is exported\n", vaddr);
1319 		return -EINVAL;
1320 	}
1321 
1322 	hash_del(&hnode->node);
1323 	mutex_unlock(&ctx->mem_hash_lock);
1324 
1325 	vm_type = hnode->ptr;
1326 
1327 	if (*vm_type == VM_TYPE_USERPTR) {
1328 		is_userptr = true;
1329 		userptr = hnode->ptr;
1330 
1331 		rc = init_phys_pg_pack_from_userptr(ctx, userptr, &phys_pg_pack,
1332 							false);
1333 		if (rc) {
1334 			dev_err(hdev->dev,
1335 				"unable to init page pack for vaddr 0x%llx\n",
1336 				vaddr);
1337 			goto vm_type_err;
1338 		}
1339 
1340 		if (phys_pg_pack->page_size ==
1341 					hdev->asic_prop.pmmu.page_size)
1342 			va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST];
1343 		else
1344 			va_range = ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE];
1345 	} else if (*vm_type == VM_TYPE_PHYS_PACK) {
1346 		is_userptr = false;
1347 		va_range = ctx->va_range[HL_VA_RANGE_TYPE_DRAM];
1348 		phys_pg_pack = hnode->ptr;
1349 	} else {
1350 		dev_warn(hdev->dev,
1351 			"unmap failed, unknown vm desc for vaddr 0x%llx\n",
1352 				vaddr);
1353 		rc = -EFAULT;
1354 		goto vm_type_err;
1355 	}
1356 
1357 	if (atomic_read(&phys_pg_pack->mapping_cnt) == 0) {
1358 		dev_err(hdev->dev, "vaddr 0x%llx is not mapped\n", vaddr);
1359 		rc = -EINVAL;
1360 		goto mapping_cnt_err;
1361 	}
1362 
1363 	if (!is_userptr && !is_power_of_2(phys_pg_pack->page_size))
1364 		vaddr = prop->dram_base_address +
1365 			DIV_ROUND_DOWN_ULL(vaddr - prop->dram_base_address,
1366 						phys_pg_pack->page_size) *
1367 							phys_pg_pack->page_size;
1368 	else
1369 		vaddr &= ~(((u64) phys_pg_pack->page_size) - 1);
1370 
1371 	mutex_lock(&hdev->mmu_lock);
1372 
1373 	unmap_phys_pg_pack(ctx, vaddr, phys_pg_pack);
1374 
1375 	/*
1376 	 * During context free this function is called in a loop to clean all
1377 	 * the context mappings. Hence the cache invalidation can be called once
1378 	 * at the loop end rather than for each iteration
1379 	 */
1380 	if (!ctx_free)
1381 		rc = hl_mmu_invalidate_cache_range(hdev, true, *vm_type, ctx->asid, vaddr,
1382 							phys_pg_pack->total_size);
1383 
1384 	mutex_unlock(&hdev->mmu_lock);
1385 
1386 	/*
1387 	 * If the context is closing we don't need to check for the MMU cache
1388 	 * invalidation return code and update the VA free list as in this flow
1389 	 * we invalidate the MMU cache outside of this unmap function and the VA
1390 	 * free list will be freed anyway.
1391 	 */
1392 	if (!ctx_free) {
1393 		int tmp_rc;
1394 
1395 		tmp_rc = add_va_block(hdev, va_range, vaddr,
1396 					vaddr + phys_pg_pack->total_size - 1);
1397 		if (tmp_rc) {
1398 			dev_warn(hdev->dev,
1399 					"add va block failed for vaddr: 0x%llx\n",
1400 					vaddr);
1401 			if (!rc)
1402 				rc = tmp_rc;
1403 		}
1404 	}
1405 
1406 	atomic_dec(&phys_pg_pack->mapping_cnt);
1407 	kfree(hnode);
1408 
1409 	if (is_userptr) {
1410 		free_phys_pg_pack(hdev, phys_pg_pack);
1411 		dma_unmap_host_va(hdev, userptr);
1412 	}
1413 
1414 	return rc;
1415 
1416 mapping_cnt_err:
1417 	if (is_userptr)
1418 		free_phys_pg_pack(hdev, phys_pg_pack);
1419 vm_type_err:
1420 	mutex_lock(&ctx->mem_hash_lock);
1421 	hash_add(ctx->mem_hash, &hnode->node, vaddr);
1422 	mutex_unlock(&ctx->mem_hash_lock);
1423 
1424 	return rc;
1425 }
1426 
1427 static int map_block(struct hl_device *hdev, u64 address, u64 *handle, u32 *size)
1428 {
1429 	u32 block_id;
1430 	int rc;
1431 
1432 	*handle = 0;
1433 	if (size)
1434 		*size = 0;
1435 
1436 	rc = hdev->asic_funcs->get_hw_block_id(hdev, address, size, &block_id);
1437 	if (rc)
1438 		return rc;
1439 
1440 	*handle = block_id | HL_MMAP_TYPE_BLOCK;
1441 	*handle <<= PAGE_SHIFT;
1442 
1443 	return 0;
1444 }
1445 
1446 static void hw_block_vm_close(struct vm_area_struct *vma)
1447 {
1448 	struct hl_vm_hw_block_list_node *lnode =
1449 		(struct hl_vm_hw_block_list_node *) vma->vm_private_data;
1450 	struct hl_ctx *ctx = lnode->ctx;
1451 	long new_mmap_size;
1452 
1453 	new_mmap_size = lnode->mapped_size - (vma->vm_end - vma->vm_start);
1454 	if (new_mmap_size > 0) {
1455 		lnode->mapped_size = new_mmap_size;
1456 		return;
1457 	}
1458 
1459 	mutex_lock(&ctx->hw_block_list_lock);
1460 	list_del(&lnode->node);
1461 	mutex_unlock(&ctx->hw_block_list_lock);
1462 	hl_ctx_put(ctx);
1463 	kfree(lnode);
1464 	vma->vm_private_data = NULL;
1465 }
1466 
1467 static const struct vm_operations_struct hw_block_vm_ops = {
1468 	.close = hw_block_vm_close
1469 };
1470 
1471 /**
1472  * hl_hw_block_mmap() - mmap a hw block to user.
1473  * @hpriv: pointer to the private data of the fd
1474  * @vma: pointer to vm_area_struct of the process
1475  *
1476  * Driver increments context reference for every HW block mapped in order
1477  * to prevent user from closing FD without unmapping first
1478  */
1479 int hl_hw_block_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma)
1480 {
1481 	struct hl_vm_hw_block_list_node *lnode;
1482 	struct hl_device *hdev = hpriv->hdev;
1483 	struct hl_ctx *ctx = hpriv->ctx;
1484 	u32 block_id, block_size;
1485 	int rc;
1486 
1487 	/* We use the page offset to hold the block id and thus we need to clear
1488 	 * it before doing the mmap itself
1489 	 */
1490 	block_id = vma->vm_pgoff;
1491 	vma->vm_pgoff = 0;
1492 
1493 	/* Driver only allows mapping of a complete HW block */
1494 	block_size = vma->vm_end - vma->vm_start;
1495 
1496 	if (!access_ok((void __user *) (uintptr_t) vma->vm_start, block_size)) {
1497 		dev_err(hdev->dev,
1498 			"user pointer is invalid - 0x%lx\n",
1499 			vma->vm_start);
1500 
1501 		return -EINVAL;
1502 	}
1503 
1504 	lnode = kzalloc(sizeof(*lnode), GFP_KERNEL);
1505 	if (!lnode)
1506 		return -ENOMEM;
1507 
1508 	rc = hdev->asic_funcs->hw_block_mmap(hdev, vma, block_id, block_size);
1509 	if (rc) {
1510 		kfree(lnode);
1511 		return rc;
1512 	}
1513 
1514 	hl_ctx_get(ctx);
1515 
1516 	lnode->ctx = ctx;
1517 	lnode->vaddr = vma->vm_start;
1518 	lnode->block_size = block_size;
1519 	lnode->mapped_size = lnode->block_size;
1520 	lnode->id = block_id;
1521 
1522 	vma->vm_private_data = lnode;
1523 	vma->vm_ops = &hw_block_vm_ops;
1524 
1525 	mutex_lock(&ctx->hw_block_list_lock);
1526 	list_add_tail(&lnode->node, &ctx->hw_block_mem_list);
1527 	mutex_unlock(&ctx->hw_block_list_lock);
1528 
1529 	vma->vm_pgoff = block_id;
1530 
1531 	return 0;
1532 }
1533 
1534 static int set_dma_sg(struct scatterlist *sg, u64 bar_address, u64 chunk_size,
1535 			struct device *dev, enum dma_data_direction dir)
1536 {
1537 	dma_addr_t addr;
1538 	int rc;
1539 
1540 	addr = dma_map_resource(dev, bar_address, chunk_size, dir,
1541 				DMA_ATTR_SKIP_CPU_SYNC);
1542 	rc = dma_mapping_error(dev, addr);
1543 	if (rc)
1544 		return rc;
1545 
1546 	sg_set_page(sg, NULL, chunk_size, 0);
1547 	sg_dma_address(sg) = addr;
1548 	sg_dma_len(sg) = chunk_size;
1549 
1550 	return 0;
1551 }
1552 
1553 static struct sg_table *alloc_sgt_from_device_pages(struct hl_device *hdev, u64 *pages, u64 npages,
1554 						u64 page_size, u64 exported_size,
1555 						struct device *dev, enum dma_data_direction dir)
1556 {
1557 	u64 chunk_size, bar_address, dma_max_seg_size, cur_size_to_export, cur_npages;
1558 	struct asic_fixed_properties *prop;
1559 	int rc, i, j, nents, cur_page;
1560 	struct scatterlist *sg;
1561 	struct sg_table *sgt;
1562 
1563 	prop = &hdev->asic_prop;
1564 
1565 	dma_max_seg_size = dma_get_max_seg_size(dev);
1566 
1567 	/* We would like to align the max segment size to PAGE_SIZE, so the
1568 	 * SGL will contain aligned addresses that can be easily mapped to
1569 	 * an MMU
1570 	 */
1571 	dma_max_seg_size = ALIGN_DOWN(dma_max_seg_size, PAGE_SIZE);
1572 	if (dma_max_seg_size < PAGE_SIZE) {
1573 		dev_err_ratelimited(hdev->dev,
1574 				"dma_max_seg_size %llu can't be smaller than PAGE_SIZE\n",
1575 				dma_max_seg_size);
1576 		return ERR_PTR(-EINVAL);
1577 	}
1578 
1579 	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
1580 	if (!sgt)
1581 		return ERR_PTR(-ENOMEM);
1582 
1583 	/* remove export size restrictions in case not explicitly defined */
1584 	cur_size_to_export = exported_size ? exported_size : (npages * page_size);
1585 
1586 	/* If the size of each page is larger than the dma max segment size,
1587 	 * then we can't combine pages and the number of entries in the SGL
1588 	 * will just be the
1589 	 * <number of pages> * <chunks of max segment size in each page>
1590 	 */
1591 	if (page_size > dma_max_seg_size) {
1592 		/* we should limit number of pages according to the exported size */
1593 		cur_npages = DIV_ROUND_UP_SECTOR_T(cur_size_to_export, page_size);
1594 		nents = cur_npages * DIV_ROUND_UP_SECTOR_T(page_size, dma_max_seg_size);
1595 	} else {
1596 		cur_npages = npages;
1597 
1598 		/* Get number of non-contiguous chunks */
1599 		for (i = 1, nents = 1, chunk_size = page_size ; i < cur_npages ; i++) {
1600 			if (pages[i - 1] + page_size != pages[i] ||
1601 					chunk_size + page_size > dma_max_seg_size) {
1602 				nents++;
1603 				chunk_size = page_size;
1604 				continue;
1605 			}
1606 
1607 			chunk_size += page_size;
1608 		}
1609 	}
1610 
1611 	rc = sg_alloc_table(sgt, nents, GFP_KERNEL | __GFP_ZERO);
1612 	if (rc)
1613 		goto error_free;
1614 
1615 	cur_page = 0;
1616 
1617 	if (page_size > dma_max_seg_size) {
1618 		u64 size_left, cur_device_address = 0;
1619 
1620 		size_left = page_size;
1621 
1622 		/* Need to split each page into the number of chunks of
1623 		 * dma_max_seg_size
1624 		 */
1625 		for_each_sgtable_dma_sg(sgt, sg, i) {
1626 			if (size_left == page_size)
1627 				cur_device_address =
1628 					pages[cur_page] - prop->dram_base_address;
1629 			else
1630 				cur_device_address += dma_max_seg_size;
1631 
1632 			/* make sure not to export over exported size */
1633 			chunk_size = min3(size_left, dma_max_seg_size, cur_size_to_export);
1634 
1635 			bar_address = hdev->dram_pci_bar_start + cur_device_address;
1636 
1637 			rc = set_dma_sg(sg, bar_address, chunk_size, dev, dir);
1638 			if (rc)
1639 				goto error_unmap;
1640 
1641 			cur_size_to_export -= chunk_size;
1642 
1643 			if (size_left > dma_max_seg_size) {
1644 				size_left -= dma_max_seg_size;
1645 			} else {
1646 				cur_page++;
1647 				size_left = page_size;
1648 			}
1649 		}
1650 	} else {
1651 		/* Merge pages and put them into the scatterlist */
1652 		for_each_sgtable_dma_sg(sgt, sg, i) {
1653 			chunk_size = page_size;
1654 			for (j = cur_page + 1 ; j < cur_npages ; j++) {
1655 				if (pages[j - 1] + page_size != pages[j] ||
1656 						chunk_size + page_size > dma_max_seg_size)
1657 					break;
1658 
1659 				chunk_size += page_size;
1660 			}
1661 
1662 			bar_address = hdev->dram_pci_bar_start +
1663 					(pages[cur_page] - prop->dram_base_address);
1664 
1665 			/* make sure not to export over exported size */
1666 			chunk_size = min(chunk_size, cur_size_to_export);
1667 			rc = set_dma_sg(sg, bar_address, chunk_size, dev, dir);
1668 			if (rc)
1669 				goto error_unmap;
1670 
1671 			cur_size_to_export -= chunk_size;
1672 			cur_page = j;
1673 		}
1674 	}
1675 
1676 	/* Because we are not going to include a CPU list we want to have some
1677 	 * chance that other users will detect this by setting the orig_nents
1678 	 * to 0 and using only nents (length of DMA list) when going over the
1679 	 * sgl
1680 	 */
1681 	sgt->orig_nents = 0;
1682 
1683 	return sgt;
1684 
1685 error_unmap:
1686 	for_each_sgtable_dma_sg(sgt, sg, i) {
1687 		if (!sg_dma_len(sg))
1688 			continue;
1689 
1690 		dma_unmap_resource(dev, sg_dma_address(sg),
1691 					sg_dma_len(sg), dir,
1692 					DMA_ATTR_SKIP_CPU_SYNC);
1693 	}
1694 
1695 	sg_free_table(sgt);
1696 
1697 error_free:
1698 	kfree(sgt);
1699 	return ERR_PTR(rc);
1700 }
1701 
1702 static int hl_dmabuf_attach(struct dma_buf *dmabuf,
1703 				struct dma_buf_attachment *attachment)
1704 {
1705 	struct hl_dmabuf_priv *hl_dmabuf;
1706 	struct hl_device *hdev;
1707 	int rc;
1708 
1709 	hl_dmabuf = dmabuf->priv;
1710 	hdev = hl_dmabuf->ctx->hdev;
1711 
1712 	rc = pci_p2pdma_distance(hdev->pdev, attachment->dev, true);
1713 
1714 	if (rc < 0)
1715 		attachment->peer2peer = false;
1716 	return 0;
1717 }
1718 
1719 static struct sg_table *hl_map_dmabuf(struct dma_buf_attachment *attachment,
1720 					enum dma_data_direction dir)
1721 {
1722 	struct dma_buf *dma_buf = attachment->dmabuf;
1723 	struct hl_vm_phys_pg_pack *phys_pg_pack;
1724 	struct hl_dmabuf_priv *hl_dmabuf;
1725 	struct hl_device *hdev;
1726 	struct sg_table *sgt;
1727 
1728 	hl_dmabuf = dma_buf->priv;
1729 	hdev = hl_dmabuf->ctx->hdev;
1730 	phys_pg_pack = hl_dmabuf->phys_pg_pack;
1731 
1732 	if (!attachment->peer2peer) {
1733 		dev_dbg(hdev->dev, "Failed to map dmabuf because p2p is disabled\n");
1734 		return ERR_PTR(-EPERM);
1735 	}
1736 
1737 	if (phys_pg_pack)
1738 		sgt = alloc_sgt_from_device_pages(hdev,
1739 						phys_pg_pack->pages,
1740 						phys_pg_pack->npages,
1741 						phys_pg_pack->page_size,
1742 						phys_pg_pack->exported_size,
1743 						attachment->dev,
1744 						dir);
1745 	else
1746 		sgt = alloc_sgt_from_device_pages(hdev,
1747 						&hl_dmabuf->device_address,
1748 						1,
1749 						hl_dmabuf->dmabuf->size,
1750 						0,
1751 						attachment->dev,
1752 						dir);
1753 
1754 	if (IS_ERR(sgt))
1755 		dev_err(hdev->dev, "failed (%ld) to initialize sgt for dmabuf\n", PTR_ERR(sgt));
1756 
1757 	return sgt;
1758 }
1759 
1760 static void hl_unmap_dmabuf(struct dma_buf_attachment *attachment,
1761 				  struct sg_table *sgt,
1762 				  enum dma_data_direction dir)
1763 {
1764 	struct scatterlist *sg;
1765 	int i;
1766 
1767 	/* The memory behind the dma-buf has *always* resided on the device itself, i.e. it lives
1768 	 * only in the 'device' domain (after all, it maps a PCI bar address which points to the
1769 	 * device memory).
1770 	 *
1771 	 * Therefore, it was never in the 'CPU' domain and hence, there is no need to perform
1772 	 * a sync of the memory to the CPU's cache, as it never resided inside that cache.
1773 	 */
1774 	for_each_sgtable_dma_sg(sgt, sg, i)
1775 		dma_unmap_resource(attachment->dev, sg_dma_address(sg),
1776 					sg_dma_len(sg), dir,
1777 					DMA_ATTR_SKIP_CPU_SYNC);
1778 
1779 	/* Need to restore orig_nents because sg_free_table use that field */
1780 	sgt->orig_nents = sgt->nents;
1781 	sg_free_table(sgt);
1782 	kfree(sgt);
1783 }
1784 
1785 static struct hl_vm_hash_node *memhash_node_export_get(struct hl_ctx *ctx, u64 addr)
1786 {
1787 	struct hl_device *hdev = ctx->hdev;
1788 	struct hl_vm_hash_node *hnode;
1789 
1790 	/* get the memory handle */
1791 	mutex_lock(&ctx->mem_hash_lock);
1792 	hnode = get_vm_hash_node_locked(ctx, addr);
1793 	if (!hnode) {
1794 		mutex_unlock(&ctx->mem_hash_lock);
1795 		dev_dbg(hdev->dev, "map address %#llx not found\n", addr);
1796 		return ERR_PTR(-EINVAL);
1797 	}
1798 
1799 	if (upper_32_bits(hnode->handle)) {
1800 		mutex_unlock(&ctx->mem_hash_lock);
1801 		dev_dbg(hdev->dev, "invalid handle %#llx for map address %#llx\n",
1802 				hnode->handle, addr);
1803 		return ERR_PTR(-EINVAL);
1804 	}
1805 
1806 	/*
1807 	 * node found, increase export count so this memory cannot be unmapped
1808 	 * and the hash node cannot be deleted.
1809 	 */
1810 	hnode->export_cnt++;
1811 	mutex_unlock(&ctx->mem_hash_lock);
1812 
1813 	return hnode;
1814 }
1815 
1816 static void memhash_node_export_put(struct hl_ctx *ctx, struct hl_vm_hash_node *hnode)
1817 {
1818 	mutex_lock(&ctx->mem_hash_lock);
1819 	hnode->export_cnt--;
1820 	mutex_unlock(&ctx->mem_hash_lock);
1821 }
1822 
1823 static void hl_release_dmabuf(struct dma_buf *dmabuf)
1824 {
1825 	struct hl_dmabuf_priv *hl_dmabuf = dmabuf->priv;
1826 	struct hl_ctx *ctx;
1827 
1828 	if (!hl_dmabuf)
1829 		return;
1830 
1831 	ctx = hl_dmabuf->ctx;
1832 
1833 	if (hl_dmabuf->memhash_hnode)
1834 		memhash_node_export_put(ctx, hl_dmabuf->memhash_hnode);
1835 
1836 	atomic_dec(&ctx->hdev->dmabuf_export_cnt);
1837 	hl_ctx_put(ctx);
1838 
1839 	/* Paired with get_file() in export_dmabuf() */
1840 	fput(ctx->hpriv->filp);
1841 
1842 	kfree(hl_dmabuf);
1843 }
1844 
1845 static const struct dma_buf_ops habanalabs_dmabuf_ops = {
1846 	.attach = hl_dmabuf_attach,
1847 	.map_dma_buf = hl_map_dmabuf,
1848 	.unmap_dma_buf = hl_unmap_dmabuf,
1849 	.release = hl_release_dmabuf,
1850 };
1851 
1852 static int export_dmabuf(struct hl_ctx *ctx,
1853 				struct hl_dmabuf_priv *hl_dmabuf,
1854 				u64 total_size, int flags, int *dmabuf_fd)
1855 {
1856 	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1857 	struct hl_device *hdev = ctx->hdev;
1858 	int rc, fd;
1859 
1860 	exp_info.ops = &habanalabs_dmabuf_ops;
1861 	exp_info.size = total_size;
1862 	exp_info.flags = flags;
1863 	exp_info.priv = hl_dmabuf;
1864 
1865 	hl_dmabuf->dmabuf = dma_buf_export(&exp_info);
1866 	if (IS_ERR(hl_dmabuf->dmabuf)) {
1867 		dev_err(hdev->dev, "failed to export dma-buf\n");
1868 		return PTR_ERR(hl_dmabuf->dmabuf);
1869 	}
1870 
1871 	fd = dma_buf_fd(hl_dmabuf->dmabuf, flags);
1872 	if (fd < 0) {
1873 		dev_err(hdev->dev, "failed to get a file descriptor for a dma-buf, %d\n", fd);
1874 		rc = fd;
1875 		goto err_dma_buf_put;
1876 	}
1877 
1878 	hl_dmabuf->ctx = ctx;
1879 	hl_ctx_get(hl_dmabuf->ctx);
1880 	atomic_inc(&ctx->hdev->dmabuf_export_cnt);
1881 
1882 	/* Get compute device file to enforce release order, such that all exported dma-buf will be
1883 	 * released first and only then the compute device.
1884 	 * Paired with fput() in hl_release_dmabuf().
1885 	 */
1886 	get_file(ctx->hpriv->filp);
1887 
1888 	*dmabuf_fd = fd;
1889 
1890 	return 0;
1891 
1892 err_dma_buf_put:
1893 	hl_dmabuf->dmabuf->priv = NULL;
1894 	dma_buf_put(hl_dmabuf->dmabuf);
1895 	return rc;
1896 }
1897 
1898 static int validate_export_params_common(struct hl_device *hdev, u64 device_addr, u64 size)
1899 {
1900 	if (!IS_ALIGNED(device_addr, PAGE_SIZE)) {
1901 		dev_dbg(hdev->dev,
1902 			"exported device memory address 0x%llx should be aligned to 0x%lx\n",
1903 			device_addr, PAGE_SIZE);
1904 		return -EINVAL;
1905 	}
1906 
1907 	if (size < PAGE_SIZE) {
1908 		dev_dbg(hdev->dev,
1909 			"exported device memory size %llu should be equal to or greater than %lu\n",
1910 			size, PAGE_SIZE);
1911 		return -EINVAL;
1912 	}
1913 
1914 	return 0;
1915 }
1916 
1917 static int validate_export_params_no_mmu(struct hl_device *hdev, u64 device_addr, u64 size)
1918 {
1919 	struct asic_fixed_properties *prop = &hdev->asic_prop;
1920 	u64 bar_address;
1921 	int rc;
1922 
1923 	rc = validate_export_params_common(hdev, device_addr, size);
1924 	if (rc)
1925 		return rc;
1926 
1927 	if (device_addr < prop->dram_user_base_address ||
1928 				(device_addr + size) > prop->dram_end_address ||
1929 				(device_addr + size) < device_addr) {
1930 		dev_dbg(hdev->dev,
1931 			"DRAM memory range 0x%llx (+0x%llx) is outside of DRAM boundaries\n",
1932 			device_addr, size);
1933 		return -EINVAL;
1934 	}
1935 
1936 	bar_address = hdev->dram_pci_bar_start + (device_addr - prop->dram_base_address);
1937 
1938 	if ((bar_address + size) > (hdev->dram_pci_bar_start + prop->dram_pci_bar_size) ||
1939 			(bar_address + size) < bar_address) {
1940 		dev_dbg(hdev->dev,
1941 			"DRAM memory range 0x%llx (+0x%llx) is outside of PCI BAR boundaries\n",
1942 			device_addr, size);
1943 		return -EINVAL;
1944 	}
1945 
1946 	return 0;
1947 }
1948 
1949 static int validate_export_params(struct hl_device *hdev, u64 device_addr, u64 size, u64 offset,
1950 					struct hl_vm_phys_pg_pack *phys_pg_pack)
1951 {
1952 	struct asic_fixed_properties *prop = &hdev->asic_prop;
1953 	u64 bar_address;
1954 	int i, rc;
1955 
1956 	rc = validate_export_params_common(hdev, device_addr, size);
1957 	if (rc)
1958 		return rc;
1959 
1960 	if ((offset + size) > phys_pg_pack->total_size) {
1961 		dev_dbg(hdev->dev, "offset %#llx and size %#llx exceed total map size %#llx\n",
1962 				offset, size, phys_pg_pack->total_size);
1963 		return -EINVAL;
1964 	}
1965 
1966 	for (i = 0 ; i < phys_pg_pack->npages ; i++) {
1967 
1968 		bar_address = hdev->dram_pci_bar_start +
1969 					(phys_pg_pack->pages[i] - prop->dram_base_address);
1970 
1971 		if ((bar_address + phys_pg_pack->page_size) >
1972 				(hdev->dram_pci_bar_start + prop->dram_pci_bar_size) ||
1973 				(bar_address + phys_pg_pack->page_size) < bar_address) {
1974 			dev_dbg(hdev->dev,
1975 				"DRAM memory range 0x%llx (+0x%x) is outside of PCI BAR boundaries\n",
1976 					phys_pg_pack->pages[i],
1977 					phys_pg_pack->page_size);
1978 
1979 			return -EINVAL;
1980 		}
1981 	}
1982 
1983 	return 0;
1984 }
1985 
1986 static struct hl_vm_phys_pg_pack *get_phys_pg_pack_from_hash_node(struct hl_device *hdev,
1987 							struct hl_vm_hash_node *hnode)
1988 {
1989 	struct hl_vm_phys_pg_pack *phys_pg_pack;
1990 	struct hl_vm *vm = &hdev->vm;
1991 
1992 	spin_lock(&vm->idr_lock);
1993 	phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, (u32) hnode->handle);
1994 	if (!phys_pg_pack) {
1995 		spin_unlock(&vm->idr_lock);
1996 		dev_dbg(hdev->dev, "no match for handle 0x%x\n", (u32) hnode->handle);
1997 		return ERR_PTR(-EINVAL);
1998 	}
1999 
2000 	spin_unlock(&vm->idr_lock);
2001 
2002 	if (phys_pg_pack->vm_type != VM_TYPE_PHYS_PACK) {
2003 		dev_dbg(hdev->dev, "handle 0x%llx does not represent DRAM memory\n", hnode->handle);
2004 		return ERR_PTR(-EINVAL);
2005 	}
2006 
2007 	return phys_pg_pack;
2008 }
2009 
2010 /**
2011  * export_dmabuf_from_addr() - export a dma-buf object for the given memory
2012  *                             address and size.
2013  * @ctx: pointer to the context structure.
2014  * @addr: device address.
2015  * @size: size of device memory to export.
2016  * @offset: the offset into the buffer from which to start exporting
2017  * @flags: DMA-BUF file/FD flags.
2018  * @dmabuf_fd: pointer to result FD that represents the dma-buf object.
2019  *
2020  * Create and export a dma-buf object for an existing memory allocation inside
2021  * the device memory, and return a FD which is associated with the dma-buf
2022  * object.
2023  *
2024  * Return: 0 on success, non-zero for failure.
2025  */
2026 static int export_dmabuf_from_addr(struct hl_ctx *ctx, u64 addr, u64 size, u64 offset,
2027 					int flags, int *dmabuf_fd)
2028 {
2029 	struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
2030 	struct hl_vm_hash_node *hnode = NULL;
2031 	struct asic_fixed_properties *prop;
2032 	struct hl_dmabuf_priv *hl_dmabuf;
2033 	struct hl_device *hdev;
2034 	u64 export_addr;
2035 	int rc;
2036 
2037 	hdev = ctx->hdev;
2038 	prop = &hdev->asic_prop;
2039 
2040 	/* offset must be 0 in devices without virtual memory support */
2041 	if (!prop->dram_supports_virtual_memory && offset) {
2042 		dev_dbg(hdev->dev, "offset is not allowed in device without virtual memory\n");
2043 		return -EINVAL;
2044 	}
2045 
2046 	export_addr = addr + offset;
2047 
2048 	hl_dmabuf = kzalloc(sizeof(*hl_dmabuf), GFP_KERNEL);
2049 	if (!hl_dmabuf)
2050 		return -ENOMEM;
2051 
2052 	if (prop->dram_supports_virtual_memory) {
2053 		hnode = memhash_node_export_get(ctx, addr);
2054 		if (IS_ERR(hnode)) {
2055 			rc = PTR_ERR(hnode);
2056 			goto err_free_dmabuf_wrapper;
2057 		}
2058 		phys_pg_pack = get_phys_pg_pack_from_hash_node(hdev, hnode);
2059 		if (IS_ERR(phys_pg_pack)) {
2060 			rc = PTR_ERR(phys_pg_pack);
2061 			goto dec_memhash_export_cnt;
2062 		}
2063 		rc = validate_export_params(hdev, export_addr, size, offset, phys_pg_pack);
2064 		if (rc)
2065 			goto dec_memhash_export_cnt;
2066 
2067 		phys_pg_pack->exported_size = size;
2068 		hl_dmabuf->phys_pg_pack = phys_pg_pack;
2069 		hl_dmabuf->memhash_hnode = hnode;
2070 	} else {
2071 		rc = validate_export_params_no_mmu(hdev, export_addr, size);
2072 		if (rc)
2073 			goto err_free_dmabuf_wrapper;
2074 	}
2075 
2076 	hl_dmabuf->device_address = export_addr;
2077 
2078 	rc = export_dmabuf(ctx, hl_dmabuf, size, flags, dmabuf_fd);
2079 	if (rc)
2080 		goto dec_memhash_export_cnt;
2081 
2082 	return 0;
2083 
2084 dec_memhash_export_cnt:
2085 	if (prop->dram_supports_virtual_memory)
2086 		memhash_node_export_put(ctx, hnode);
2087 err_free_dmabuf_wrapper:
2088 	kfree(hl_dmabuf);
2089 	return rc;
2090 }
2091 
2092 static int mem_ioctl_no_mmu(struct hl_fpriv *hpriv, union hl_mem_args *args)
2093 {
2094 	struct hl_device *hdev = hpriv->hdev;
2095 	u64 block_handle, device_addr = 0;
2096 	struct hl_ctx *ctx = hpriv->ctx;
2097 	u32 handle = 0, block_size;
2098 	int rc;
2099 
2100 	switch (args->in.op) {
2101 	case HL_MEM_OP_ALLOC:
2102 		if (args->in.alloc.mem_size == 0) {
2103 			dev_err(hdev->dev, "alloc size must be larger than 0\n");
2104 			rc = -EINVAL;
2105 			goto out;
2106 		}
2107 
2108 		/* Force contiguous as there are no real MMU
2109 		 * translations to overcome physical memory gaps
2110 		 */
2111 		args->in.flags |= HL_MEM_CONTIGUOUS;
2112 		rc = alloc_device_memory(ctx, &args->in, &handle);
2113 
2114 		memset(args, 0, sizeof(*args));
2115 		args->out.handle = (__u64) handle;
2116 		break;
2117 
2118 	case HL_MEM_OP_FREE:
2119 		rc = free_device_memory(ctx, &args->in);
2120 		break;
2121 
2122 	case HL_MEM_OP_MAP:
2123 		if (args->in.flags & HL_MEM_USERPTR) {
2124 			dev_err(hdev->dev, "Failed to map host memory when MMU is disabled\n");
2125 			rc = -EPERM;
2126 		} else {
2127 			rc = get_paddr_from_handle(ctx, &args->in, &device_addr);
2128 			memset(args, 0, sizeof(*args));
2129 			args->out.device_virt_addr = device_addr;
2130 		}
2131 
2132 		break;
2133 
2134 	case HL_MEM_OP_UNMAP:
2135 		rc = 0;
2136 		break;
2137 
2138 	case HL_MEM_OP_MAP_BLOCK:
2139 		rc = map_block(hdev, args->in.map_block.block_addr, &block_handle, &block_size);
2140 		args->out.block_handle = block_handle;
2141 		args->out.block_size = block_size;
2142 		break;
2143 
2144 	case HL_MEM_OP_EXPORT_DMABUF_FD:
2145 		dev_err(hdev->dev, "Failed to export dma-buf object when MMU is disabled\n");
2146 		rc = -EPERM;
2147 		break;
2148 
2149 	case HL_MEM_OP_TS_ALLOC:
2150 		rc = allocate_timestamps_buffers(hpriv, &args->in, &args->out.handle);
2151 		break;
2152 	default:
2153 		dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
2154 		rc = -EINVAL;
2155 		break;
2156 	}
2157 
2158 out:
2159 	return rc;
2160 }
2161 
2162 static void ts_buff_release(struct hl_mmap_mem_buf *buf)
2163 {
2164 	struct hl_ts_buff *ts_buff = buf->private;
2165 
2166 	vfree(ts_buff->kernel_buff_address);
2167 	vfree(ts_buff->user_buff_address);
2168 	kfree(ts_buff);
2169 }
2170 
2171 static int hl_ts_mmap(struct hl_mmap_mem_buf *buf, struct vm_area_struct *vma, void *args)
2172 {
2173 	struct hl_ts_buff *ts_buff = buf->private;
2174 
2175 	vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY | VM_NORESERVE);
2176 	return remap_vmalloc_range(vma, ts_buff->user_buff_address, 0);
2177 }
2178 
2179 static int hl_ts_alloc_buf(struct hl_mmap_mem_buf *buf, gfp_t gfp, void *args)
2180 {
2181 	struct hl_ts_buff *ts_buff = NULL;
2182 	u32 num_elements;
2183 	size_t size;
2184 	void *p;
2185 
2186 	num_elements = *(u32 *)args;
2187 
2188 	ts_buff = kzalloc(sizeof(*ts_buff), gfp);
2189 	if (!ts_buff)
2190 		return -ENOMEM;
2191 
2192 	/* Allocate the user buffer */
2193 	size = num_elements * sizeof(u64);
2194 	p = vmalloc_user(size);
2195 	if (!p)
2196 		goto free_mem;
2197 
2198 	ts_buff->user_buff_address = p;
2199 	buf->mappable_size = size;
2200 
2201 	/* Allocate the internal kernel buffer */
2202 	size = num_elements * sizeof(struct hl_user_pending_interrupt);
2203 	p = vzalloc(size);
2204 	if (!p)
2205 		goto free_user_buff;
2206 
2207 	ts_buff->kernel_buff_address = p;
2208 	ts_buff->kernel_buff_size = size;
2209 
2210 	buf->private = ts_buff;
2211 
2212 	return 0;
2213 
2214 free_user_buff:
2215 	vfree(ts_buff->user_buff_address);
2216 free_mem:
2217 	kfree(ts_buff);
2218 	return -ENOMEM;
2219 }
2220 
2221 static struct hl_mmap_mem_buf_behavior hl_ts_behavior = {
2222 	.topic = "TS",
2223 	.mem_id = HL_MMAP_TYPE_TS_BUFF,
2224 	.mmap = hl_ts_mmap,
2225 	.alloc = hl_ts_alloc_buf,
2226 	.release = ts_buff_release,
2227 };
2228 
2229 /**
2230  * allocate_timestamps_buffers() - allocate timestamps buffers
2231  * This function will allocate ts buffer that will later on be mapped to the user
2232  * in order to be able to read the timestamp.
2233  * in addition it'll allocate an extra buffer for registration management.
2234  * since we cannot fail during registration for out-of-memory situation, so
2235  * we'll prepare a pool which will be used as user interrupt nodes and instead
2236  * of dynamically allocating nodes while registration we'll pick the node from
2237  * this pool. in addition it'll add node to the mapping hash which will be used
2238  * to map user ts buffer to the internal kernel ts buffer.
2239  * @hpriv: pointer to the private data of the fd
2240  * @args: ioctl input
2241  * @handle: user timestamp buffer handle as an output
2242  */
2243 static int allocate_timestamps_buffers(struct hl_fpriv *hpriv, struct hl_mem_in *args, u64 *handle)
2244 {
2245 	struct hl_mem_mgr *mmg = &hpriv->mem_mgr;
2246 	struct hl_mmap_mem_buf *buf;
2247 
2248 	if (args->num_of_elements > TS_MAX_ELEMENTS_NUM) {
2249 		dev_err(mmg->dev, "Num of elements exceeds Max allowed number (0x%x > 0x%x)\n",
2250 				args->num_of_elements, TS_MAX_ELEMENTS_NUM);
2251 		return -EINVAL;
2252 	}
2253 
2254 	buf = hl_mmap_mem_buf_alloc(mmg, &hl_ts_behavior, GFP_KERNEL, &args->num_of_elements);
2255 	if (!buf)
2256 		return -ENOMEM;
2257 
2258 	*handle = buf->handle;
2259 
2260 	return 0;
2261 }
2262 
2263 int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data)
2264 {
2265 	enum hl_device_status status;
2266 	union hl_mem_args *args = data;
2267 	struct hl_device *hdev = hpriv->hdev;
2268 	struct hl_ctx *ctx = hpriv->ctx;
2269 	u64 block_handle, device_addr = 0;
2270 	u32 handle = 0, block_size;
2271 	int rc, dmabuf_fd = -EBADF;
2272 
2273 	if (!hl_device_operational(hdev, &status)) {
2274 		dev_dbg_ratelimited(hdev->dev,
2275 			"Device is %s. Can't execute MEMORY IOCTL\n",
2276 			hdev->status[status]);
2277 		return -EBUSY;
2278 	}
2279 
2280 	if (!hdev->mmu_enable)
2281 		return mem_ioctl_no_mmu(hpriv, args);
2282 
2283 	switch (args->in.op) {
2284 	case HL_MEM_OP_ALLOC:
2285 		if (args->in.alloc.mem_size == 0) {
2286 			dev_err(hdev->dev,
2287 				"alloc size must be larger than 0\n");
2288 			rc = -EINVAL;
2289 			goto out;
2290 		}
2291 
2292 		/* If DRAM does not support virtual memory the driver won't
2293 		 * handle the allocation/freeing of that memory. However, for
2294 		 * system administration/monitoring purposes, the driver will
2295 		 * keep track of the amount of DRAM memory that is allocated
2296 		 * and freed by the user. Because this code totally relies on
2297 		 * the user's input, the driver can't ensure the validity
2298 		 * of this accounting.
2299 		 */
2300 		if (!hdev->asic_prop.dram_supports_virtual_memory) {
2301 			atomic64_add(args->in.alloc.mem_size,
2302 					&ctx->dram_phys_mem);
2303 			atomic64_add(args->in.alloc.mem_size,
2304 					&hdev->dram_used_mem);
2305 
2306 			dev_dbg(hdev->dev, "DRAM alloc is not supported\n");
2307 			rc = 0;
2308 
2309 			memset(args, 0, sizeof(*args));
2310 			args->out.handle = 0;
2311 			goto out;
2312 		}
2313 
2314 		rc = alloc_device_memory(ctx, &args->in, &handle);
2315 
2316 		memset(args, 0, sizeof(*args));
2317 		args->out.handle = (__u64) handle;
2318 		break;
2319 
2320 	case HL_MEM_OP_FREE:
2321 		/* If DRAM does not support virtual memory the driver won't
2322 		 * handle the allocation/freeing of that memory. However, for
2323 		 * system administration/monitoring purposes, the driver will
2324 		 * keep track of the amount of DRAM memory that is allocated
2325 		 * and freed by the user. Because this code totally relies on
2326 		 * the user's input, the driver can't ensure the validity
2327 		 * of this accounting.
2328 		 */
2329 		if (!hdev->asic_prop.dram_supports_virtual_memory) {
2330 			atomic64_sub(args->in.alloc.mem_size,
2331 					&ctx->dram_phys_mem);
2332 			atomic64_sub(args->in.alloc.mem_size,
2333 					&hdev->dram_used_mem);
2334 
2335 			dev_dbg(hdev->dev, "DRAM alloc is not supported\n");
2336 			rc = 0;
2337 
2338 			goto out;
2339 		}
2340 
2341 		rc = free_device_memory(ctx, &args->in);
2342 		break;
2343 
2344 	case HL_MEM_OP_MAP:
2345 		rc = map_device_va(ctx, &args->in, &device_addr);
2346 
2347 		memset(args, 0, sizeof(*args));
2348 		args->out.device_virt_addr = device_addr;
2349 		break;
2350 
2351 	case HL_MEM_OP_UNMAP:
2352 		rc = unmap_device_va(ctx, &args->in, false);
2353 		break;
2354 
2355 	case HL_MEM_OP_MAP_BLOCK:
2356 		rc = map_block(hdev, args->in.map_block.block_addr,
2357 				&block_handle, &block_size);
2358 		args->out.block_handle = block_handle;
2359 		args->out.block_size = block_size;
2360 		break;
2361 
2362 	case HL_MEM_OP_EXPORT_DMABUF_FD:
2363 		rc = export_dmabuf_from_addr(ctx,
2364 				args->in.export_dmabuf_fd.addr,
2365 				args->in.export_dmabuf_fd.mem_size,
2366 				args->in.export_dmabuf_fd.offset,
2367 				args->in.flags,
2368 				&dmabuf_fd);
2369 		memset(args, 0, sizeof(*args));
2370 		args->out.fd = dmabuf_fd;
2371 		break;
2372 
2373 	case HL_MEM_OP_TS_ALLOC:
2374 		rc = allocate_timestamps_buffers(hpriv, &args->in, &args->out.handle);
2375 		break;
2376 	default:
2377 		dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n");
2378 		rc = -EINVAL;
2379 		break;
2380 	}
2381 
2382 out:
2383 	return rc;
2384 }
2385 
2386 static int get_user_memory(struct hl_device *hdev, u64 addr, u64 size,
2387 				u32 npages, u64 start, u32 offset,
2388 				struct hl_userptr *userptr)
2389 {
2390 	int rc;
2391 
2392 	if (!access_ok((void __user *) (uintptr_t) addr, size)) {
2393 		dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr);
2394 		return -EFAULT;
2395 	}
2396 
2397 	userptr->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
2398 	if (!userptr->pages)
2399 		return -ENOMEM;
2400 
2401 	rc = pin_user_pages_fast(start, npages, FOLL_WRITE | FOLL_LONGTERM,
2402 				 userptr->pages);
2403 
2404 	if (rc != npages) {
2405 		dev_err(hdev->dev,
2406 			"Failed (%d) to pin host memory with user ptr 0x%llx, size 0x%llx, npages %d\n",
2407 			rc, addr, size, npages);
2408 		if (rc < 0)
2409 			goto destroy_pages;
2410 		npages = rc;
2411 		rc = -EFAULT;
2412 		goto put_pages;
2413 	}
2414 	userptr->npages = npages;
2415 
2416 	rc = sg_alloc_table_from_pages(userptr->sgt,
2417 				       userptr->pages,
2418 				       npages, offset, size, GFP_KERNEL);
2419 	if (rc < 0) {
2420 		dev_err(hdev->dev, "failed to create SG table from pages\n");
2421 		goto put_pages;
2422 	}
2423 
2424 	return 0;
2425 
2426 put_pages:
2427 	unpin_user_pages(userptr->pages, npages);
2428 destroy_pages:
2429 	kvfree(userptr->pages);
2430 	return rc;
2431 }
2432 
2433 /**
2434  * hl_pin_host_memory() - pins a chunk of host memory.
2435  * @hdev: pointer to the habanalabs device structure.
2436  * @addr: the host virtual address of the memory area.
2437  * @size: the size of the memory area.
2438  * @userptr: pointer to hl_userptr structure.
2439  *
2440  * This function does the following:
2441  * - Pins the physical pages.
2442  * - Create an SG list from those pages.
2443  */
2444 int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size,
2445 					struct hl_userptr *userptr)
2446 {
2447 	u64 start, end;
2448 	u32 npages, offset;
2449 	int rc;
2450 
2451 	if (!size) {
2452 		dev_err(hdev->dev, "size to pin is invalid - %llu\n", size);
2453 		return -EINVAL;
2454 	}
2455 
2456 	/*
2457 	 * If the combination of the address and size requested for this memory
2458 	 * region causes an integer overflow, return error.
2459 	 */
2460 	if (((addr + size) < addr) ||
2461 			PAGE_ALIGN(addr + size) < (addr + size)) {
2462 		dev_err(hdev->dev,
2463 			"user pointer 0x%llx + %llu causes integer overflow\n",
2464 			addr, size);
2465 		return -EINVAL;
2466 	}
2467 
2468 	userptr->pid = current->pid;
2469 	userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_KERNEL);
2470 	if (!userptr->sgt)
2471 		return -ENOMEM;
2472 
2473 	start = addr & PAGE_MASK;
2474 	offset = addr & ~PAGE_MASK;
2475 	end = PAGE_ALIGN(addr + size);
2476 	npages = (end - start) >> PAGE_SHIFT;
2477 
2478 	userptr->size = size;
2479 	userptr->addr = addr;
2480 	userptr->dma_mapped = false;
2481 	INIT_LIST_HEAD(&userptr->job_node);
2482 
2483 	rc = get_user_memory(hdev, addr, size, npages, start, offset,
2484 				userptr);
2485 	if (rc) {
2486 		dev_err(hdev->dev,
2487 			"failed to get user memory for address 0x%llx\n",
2488 			addr);
2489 		goto free_sgt;
2490 	}
2491 
2492 	hl_debugfs_add_userptr(hdev, userptr);
2493 
2494 	return 0;
2495 
2496 free_sgt:
2497 	kfree(userptr->sgt);
2498 	return rc;
2499 }
2500 
2501 /*
2502  * hl_unpin_host_memory - unpins a chunk of host memory.
2503  * @hdev: pointer to the habanalabs device structure
2504  * @userptr: pointer to hl_userptr structure
2505  *
2506  * This function does the following:
2507  * - Unpins the physical pages related to the host memory
2508  * - Free the SG list
2509  */
2510 void hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr)
2511 {
2512 	hl_debugfs_remove_userptr(hdev, userptr);
2513 
2514 	if (userptr->dma_mapped)
2515 		hdev->asic_funcs->hl_dma_unmap_sgtable(hdev, userptr->sgt, userptr->dir);
2516 
2517 	unpin_user_pages_dirty_lock(userptr->pages, userptr->npages, true);
2518 	kvfree(userptr->pages);
2519 
2520 	list_del(&userptr->job_node);
2521 
2522 	sg_free_table(userptr->sgt);
2523 	kfree(userptr->sgt);
2524 }
2525 
2526 /**
2527  * hl_userptr_delete_list() - clear userptr list.
2528  * @hdev: pointer to the habanalabs device structure.
2529  * @userptr_list: pointer to the list to clear.
2530  *
2531  * This function does the following:
2532  * - Iterates over the list and unpins the host memory and frees the userptr
2533  *   structure.
2534  */
2535 void hl_userptr_delete_list(struct hl_device *hdev,
2536 				struct list_head *userptr_list)
2537 {
2538 	struct hl_userptr *userptr, *tmp;
2539 
2540 	list_for_each_entry_safe(userptr, tmp, userptr_list, job_node) {
2541 		hl_unpin_host_memory(hdev, userptr);
2542 		kfree(userptr);
2543 	}
2544 
2545 	INIT_LIST_HEAD(userptr_list);
2546 }
2547 
2548 /**
2549  * hl_userptr_is_pinned() - returns whether the given userptr is pinned.
2550  * @hdev: pointer to the habanalabs device structure.
2551  * @addr: user address to check.
2552  * @size: user block size to check.
2553  * @userptr_list: pointer to the list to clear.
2554  * @userptr: pointer to userptr to check.
2555  *
2556  * This function does the following:
2557  * - Iterates over the list and checks if the given userptr is in it, means is
2558  *   pinned. If so, returns true, otherwise returns false.
2559  */
2560 bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr,
2561 				u32 size, struct list_head *userptr_list,
2562 				struct hl_userptr **userptr)
2563 {
2564 	list_for_each_entry((*userptr), userptr_list, job_node) {
2565 		if ((addr == (*userptr)->addr) && (size == (*userptr)->size))
2566 			return true;
2567 	}
2568 
2569 	return false;
2570 }
2571 
2572 /**
2573  * va_range_init() - initialize virtual addresses range.
2574  * @hdev: pointer to the habanalabs device structure.
2575  * @va_ranges: pointer to va_ranges array.
2576  * @range_type: virtual address range type.
2577  * @start: range start address, inclusive.
2578  * @end: range end address, inclusive.
2579  * @page_size: page size for this va_range.
2580  *
2581  * This function does the following:
2582  * - Initializes the virtual addresses list of the given range with the given
2583  *   addresses.
2584  */
2585 static int va_range_init(struct hl_device *hdev, struct hl_va_range **va_ranges,
2586 				enum hl_va_range_type range_type, u64 start,
2587 				u64 end, u32 page_size)
2588 {
2589 	struct hl_va_range *va_range = va_ranges[range_type];
2590 	int rc;
2591 
2592 	INIT_LIST_HEAD(&va_range->list);
2593 
2594 	/*
2595 	 * PAGE_SIZE alignment
2596 	 * it is the caller's responsibility to align the addresses if the
2597 	 * page size is not a power of 2
2598 	 */
2599 
2600 	if (is_power_of_2(page_size)) {
2601 		start = round_up(start, page_size);
2602 
2603 		/*
2604 		 * The end of the range is inclusive, hence we need to align it
2605 		 * to the end of the last full page in the range. For example if
2606 		 * end = 0x3ff5 with page size 0x1000, we need to align it to
2607 		 * 0x2fff. The remaining 0xff5 bytes do not form a full page.
2608 		 */
2609 		end = round_down(end + 1, page_size) - 1;
2610 	}
2611 
2612 	if (start >= end) {
2613 		dev_err(hdev->dev, "too small vm range for va list\n");
2614 		return -EFAULT;
2615 	}
2616 
2617 	rc = add_va_block(hdev, va_range, start, end);
2618 
2619 	if (rc) {
2620 		dev_err(hdev->dev, "Failed to init host va list\n");
2621 		return rc;
2622 	}
2623 
2624 	va_range->start_addr = start;
2625 	va_range->end_addr = end;
2626 	va_range->page_size = page_size;
2627 
2628 	return 0;
2629 }
2630 
2631 /**
2632  * va_range_fini() - clear a virtual addresses range.
2633  * @hdev: pointer to the habanalabs structure.
2634  * @va_range: pointer to virtual addresses range.
2635  *
2636  * This function does the following:
2637  * - Frees the virtual addresses block list and its lock.
2638  */
2639 static void va_range_fini(struct hl_device *hdev, struct hl_va_range *va_range)
2640 {
2641 	mutex_lock(&va_range->lock);
2642 	clear_va_list_locked(hdev, &va_range->list);
2643 	mutex_unlock(&va_range->lock);
2644 
2645 	mutex_destroy(&va_range->lock);
2646 	kfree(va_range);
2647 }
2648 
2649 /**
2650  * vm_ctx_init_with_ranges() - initialize virtual memory for context.
2651  * @ctx: pointer to the habanalabs context structure.
2652  * @host_range_start: host virtual addresses range start.
2653  * @host_range_end: host virtual addresses range end.
2654  * @host_page_size: host page size.
2655  * @host_huge_range_start: host virtual addresses range start for memory
2656  *                         allocated with huge pages.
2657  * @host_huge_range_end: host virtual addresses range end for memory allocated
2658  *                        with huge pages.
2659  * @host_huge_page_size: host huge page size.
2660  * @dram_range_start: dram virtual addresses range start.
2661  * @dram_range_end: dram virtual addresses range end.
2662  * @dram_page_size: dram page size.
2663  *
2664  * This function initializes the following:
2665  * - MMU for context.
2666  * - Virtual address to area descriptor hashtable.
2667  * - Virtual block list of available virtual memory.
2668  */
2669 static int vm_ctx_init_with_ranges(struct hl_ctx *ctx,
2670 					u64 host_range_start,
2671 					u64 host_range_end,
2672 					u32 host_page_size,
2673 					u64 host_huge_range_start,
2674 					u64 host_huge_range_end,
2675 					u32 host_huge_page_size,
2676 					u64 dram_range_start,
2677 					u64 dram_range_end,
2678 					u32 dram_page_size)
2679 {
2680 	struct hl_device *hdev = ctx->hdev;
2681 	int i, rc;
2682 
2683 	for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX ; i++) {
2684 		ctx->va_range[i] =
2685 			kzalloc(sizeof(struct hl_va_range), GFP_KERNEL);
2686 		if (!ctx->va_range[i]) {
2687 			rc = -ENOMEM;
2688 			goto free_va_range;
2689 		}
2690 	}
2691 
2692 	rc = hl_mmu_ctx_init(ctx);
2693 	if (rc) {
2694 		dev_err(hdev->dev, "failed to init context %d\n", ctx->asid);
2695 		goto free_va_range;
2696 	}
2697 
2698 	mutex_init(&ctx->mem_hash_lock);
2699 	hash_init(ctx->mem_hash);
2700 
2701 	mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
2702 
2703 	rc = va_range_init(hdev, ctx->va_range, HL_VA_RANGE_TYPE_HOST,
2704 			host_range_start, host_range_end, host_page_size);
2705 	if (rc) {
2706 		dev_err(hdev->dev, "failed to init host vm range\n");
2707 		goto mmu_ctx_fini;
2708 	}
2709 
2710 	if (hdev->pmmu_huge_range) {
2711 		mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
2712 
2713 		rc = va_range_init(hdev,
2714 			ctx->va_range, HL_VA_RANGE_TYPE_HOST_HUGE,
2715 			host_huge_range_start, host_huge_range_end,
2716 			host_huge_page_size);
2717 		if (rc) {
2718 			dev_err(hdev->dev,
2719 				"failed to init host huge vm range\n");
2720 			goto clear_host_va_range;
2721 		}
2722 	} else {
2723 		kfree(ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]);
2724 		ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE] =
2725 				ctx->va_range[HL_VA_RANGE_TYPE_HOST];
2726 	}
2727 
2728 	mutex_init(&ctx->va_range[HL_VA_RANGE_TYPE_DRAM]->lock);
2729 
2730 	rc = va_range_init(hdev, ctx->va_range, HL_VA_RANGE_TYPE_DRAM,
2731 			dram_range_start, dram_range_end, dram_page_size);
2732 	if (rc) {
2733 		dev_err(hdev->dev, "failed to init dram vm range\n");
2734 		goto clear_host_huge_va_range;
2735 	}
2736 
2737 	hl_debugfs_add_ctx_mem_hash(hdev, ctx);
2738 
2739 	return 0;
2740 
2741 clear_host_huge_va_range:
2742 	mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_DRAM]->lock);
2743 
2744 	if (hdev->pmmu_huge_range) {
2745 		mutex_lock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
2746 		clear_va_list_locked(hdev,
2747 			&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->list);
2748 		mutex_unlock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
2749 	}
2750 clear_host_va_range:
2751 	if (hdev->pmmu_huge_range)
2752 		mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]->lock);
2753 	mutex_lock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
2754 	clear_va_list_locked(hdev, &ctx->va_range[HL_VA_RANGE_TYPE_HOST]->list);
2755 	mutex_unlock(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
2756 mmu_ctx_fini:
2757 	mutex_destroy(&ctx->va_range[HL_VA_RANGE_TYPE_HOST]->lock);
2758 	mutex_destroy(&ctx->mem_hash_lock);
2759 	hl_mmu_ctx_fini(ctx);
2760 free_va_range:
2761 	for (i = 0 ; i < HL_VA_RANGE_TYPE_MAX ; i++)
2762 		kfree(ctx->va_range[i]);
2763 
2764 	return rc;
2765 }
2766 
2767 int hl_vm_ctx_init(struct hl_ctx *ctx)
2768 {
2769 	struct asic_fixed_properties *prop = &ctx->hdev->asic_prop;
2770 	u64 host_range_start, host_range_end, host_huge_range_start,
2771 		host_huge_range_end, dram_range_start, dram_range_end;
2772 	u32 host_page_size, host_huge_page_size, dram_page_size;
2773 
2774 	atomic64_set(&ctx->dram_phys_mem, 0);
2775 
2776 	/*
2777 	 * - If MMU is enabled, init the ranges as usual.
2778 	 * - If MMU is disabled, in case of host mapping, the returned address
2779 	 *   is the given one.
2780 	 *   In case of DRAM mapping, the returned address is the physical
2781 	 *   address of the memory related to the given handle.
2782 	 */
2783 	if (!ctx->hdev->mmu_enable)
2784 		return 0;
2785 
2786 	dram_range_start = prop->dmmu.start_addr;
2787 	dram_range_end = prop->dmmu.end_addr - 1;
2788 	dram_page_size = prop->dram_page_size ?
2789 				prop->dram_page_size : prop->dmmu.page_size;
2790 	host_range_start = prop->pmmu.start_addr;
2791 	host_range_end = prop->pmmu.end_addr - 1;
2792 	host_page_size = prop->pmmu.page_size;
2793 	host_huge_range_start = prop->pmmu_huge.start_addr;
2794 	host_huge_range_end = prop->pmmu_huge.end_addr - 1;
2795 	host_huge_page_size = prop->pmmu_huge.page_size;
2796 
2797 	return vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end,
2798 			host_page_size, host_huge_range_start,
2799 			host_huge_range_end, host_huge_page_size,
2800 			dram_range_start, dram_range_end, dram_page_size);
2801 }
2802 
2803 /**
2804  * hl_vm_ctx_fini() - virtual memory teardown of context.
2805  * @ctx: pointer to the habanalabs context structure.
2806  *
2807  * This function perform teardown the following:
2808  * - Virtual block list of available virtual memory.
2809  * - Virtual address to area descriptor hashtable.
2810  * - MMU for context.
2811  *
2812  * In addition this function does the following:
2813  * - Unmaps the existing hashtable nodes if the hashtable is not empty. The
2814  *   hashtable should be empty as no valid mappings should exist at this
2815  *   point.
2816  * - Frees any existing physical page list from the idr which relates to the
2817  *   current context asid.
2818  * - This function checks the virtual block list for correctness. At this point
2819  *   the list should contain one element which describes the whole virtual
2820  *   memory range of the context. Otherwise, a warning is printed.
2821  */
2822 void hl_vm_ctx_fini(struct hl_ctx *ctx)
2823 {
2824 	struct hl_vm_phys_pg_pack *phys_pg_list, *tmp_phys_node;
2825 	struct hl_device *hdev = ctx->hdev;
2826 	struct hl_vm_hash_node *hnode;
2827 	struct hl_vm *vm = &hdev->vm;
2828 	struct hlist_node *tmp_node;
2829 	struct list_head free_list;
2830 	struct hl_mem_in args;
2831 	int i;
2832 
2833 	if (!hdev->mmu_enable)
2834 		return;
2835 
2836 	hl_debugfs_remove_ctx_mem_hash(hdev, ctx);
2837 
2838 	/*
2839 	 * Clearly something went wrong on hard reset so no point in printing
2840 	 * another side effect error
2841 	 */
2842 	if (!hdev->reset_info.hard_reset_pending && !hash_empty(ctx->mem_hash))
2843 		dev_dbg(hdev->dev,
2844 			"user released device without removing its memory mappings\n");
2845 
2846 	hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) {
2847 		dev_dbg(hdev->dev,
2848 			"hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n",
2849 			hnode->vaddr, ctx->asid);
2850 		args.unmap.device_virt_addr = hnode->vaddr;
2851 		unmap_device_va(ctx, &args, true);
2852 	}
2853 
2854 	mutex_lock(&hdev->mmu_lock);
2855 
2856 	/* invalidate the cache once after the unmapping loop */
2857 	hl_mmu_invalidate_cache(hdev, true, MMU_OP_USERPTR);
2858 	hl_mmu_invalidate_cache(hdev, true, MMU_OP_PHYS_PACK);
2859 
2860 	mutex_unlock(&hdev->mmu_lock);
2861 
2862 	INIT_LIST_HEAD(&free_list);
2863 
2864 	spin_lock(&vm->idr_lock);
2865 	idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i)
2866 		if (phys_pg_list->asid == ctx->asid) {
2867 			dev_dbg(hdev->dev,
2868 				"page list 0x%px of asid %d is still alive\n",
2869 				phys_pg_list, ctx->asid);
2870 
2871 			atomic64_sub(phys_pg_list->total_size, &hdev->dram_used_mem);
2872 			idr_remove(&vm->phys_pg_pack_handles, i);
2873 			list_add(&phys_pg_list->node, &free_list);
2874 		}
2875 	spin_unlock(&vm->idr_lock);
2876 
2877 	list_for_each_entry_safe(phys_pg_list, tmp_phys_node, &free_list, node)
2878 		free_phys_pg_pack(hdev, phys_pg_list);
2879 
2880 	va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_DRAM]);
2881 	va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST]);
2882 
2883 	if (hdev->pmmu_huge_range)
2884 		va_range_fini(hdev, ctx->va_range[HL_VA_RANGE_TYPE_HOST_HUGE]);
2885 
2886 	mutex_destroy(&ctx->mem_hash_lock);
2887 	hl_mmu_ctx_fini(ctx);
2888 
2889 	/* In this case we need to clear the global accounting of DRAM usage
2890 	 * because the user notifies us on allocations. If the user is no more,
2891 	 * all DRAM is available
2892 	 */
2893 	if (ctx->asid != HL_KERNEL_ASID_ID &&
2894 			!hdev->asic_prop.dram_supports_virtual_memory)
2895 		atomic64_set(&hdev->dram_used_mem, 0);
2896 }
2897 
2898 /**
2899  * hl_vm_init() - initialize virtual memory module.
2900  * @hdev: pointer to the habanalabs device structure.
2901  *
2902  * This function initializes the following:
2903  * - MMU module.
2904  * - DRAM physical pages pool of 2MB.
2905  * - Idr for device memory allocation handles.
2906  */
2907 int hl_vm_init(struct hl_device *hdev)
2908 {
2909 	struct asic_fixed_properties *prop = &hdev->asic_prop;
2910 	struct hl_vm *vm = &hdev->vm;
2911 	int rc;
2912 
2913 	if (is_power_of_2(prop->dram_page_size))
2914 		vm->dram_pg_pool =
2915 			gen_pool_create(__ffs(prop->dram_page_size), -1);
2916 	else
2917 		vm->dram_pg_pool =
2918 			gen_pool_create(__ffs(DRAM_POOL_PAGE_SIZE), -1);
2919 
2920 	if (!vm->dram_pg_pool) {
2921 		dev_err(hdev->dev, "Failed to create dram page pool\n");
2922 		return -ENOMEM;
2923 	}
2924 
2925 	kref_init(&vm->dram_pg_pool_refcount);
2926 
2927 	rc = gen_pool_add(vm->dram_pg_pool, prop->dram_user_base_address,
2928 			prop->dram_end_address - prop->dram_user_base_address,
2929 			-1);
2930 
2931 	if (rc) {
2932 		dev_err(hdev->dev,
2933 			"Failed to add memory to dram page pool %d\n", rc);
2934 		goto pool_add_err;
2935 	}
2936 
2937 	spin_lock_init(&vm->idr_lock);
2938 	idr_init(&vm->phys_pg_pack_handles);
2939 
2940 	atomic64_set(&hdev->dram_used_mem, 0);
2941 
2942 	vm->init_done = true;
2943 
2944 	return 0;
2945 
2946 pool_add_err:
2947 	gen_pool_destroy(vm->dram_pg_pool);
2948 
2949 	return rc;
2950 }
2951 
2952 /**
2953  * hl_vm_fini() - virtual memory module teardown.
2954  * @hdev: pointer to the habanalabs device structure.
2955  *
2956  * This function perform teardown to the following:
2957  * - Idr for device memory allocation handles.
2958  * - DRAM physical pages pool of 2MB.
2959  * - MMU module.
2960  */
2961 void hl_vm_fini(struct hl_device *hdev)
2962 {
2963 	struct hl_vm *vm = &hdev->vm;
2964 
2965 	if (!vm->init_done)
2966 		return;
2967 
2968 	/*
2969 	 * At this point all the contexts should be freed and hence no DRAM
2970 	 * memory should be in use. Hence the DRAM pool should be freed here.
2971 	 */
2972 	if (kref_put(&vm->dram_pg_pool_refcount, dram_pg_pool_do_release) != 1)
2973 		dev_warn(hdev->dev, "dram_pg_pool was not destroyed on %s\n",
2974 				__func__);
2975 
2976 	vm->init_done = false;
2977 }
2978 
2979 /**
2980  * hl_hw_block_mem_init() - HW block memory initialization.
2981  * @ctx: pointer to the habanalabs context structure.
2982  *
2983  * This function initializes the HW block virtual mapped addresses list and
2984  * it's lock.
2985  */
2986 void hl_hw_block_mem_init(struct hl_ctx *ctx)
2987 {
2988 	mutex_init(&ctx->hw_block_list_lock);
2989 	INIT_LIST_HEAD(&ctx->hw_block_mem_list);
2990 }
2991 
2992 /**
2993  * hl_hw_block_mem_fini() - HW block memory teardown.
2994  * @ctx: pointer to the habanalabs context structure.
2995  *
2996  * This function clears the HW block virtual mapped addresses list and destroys
2997  * it's lock.
2998  */
2999 void hl_hw_block_mem_fini(struct hl_ctx *ctx)
3000 {
3001 	struct hl_vm_hw_block_list_node *lnode, *tmp;
3002 
3003 	if (!list_empty(&ctx->hw_block_mem_list))
3004 		dev_crit(ctx->hdev->dev, "HW block mem list isn't empty\n");
3005 
3006 	list_for_each_entry_safe(lnode, tmp, &ctx->hw_block_mem_list, node) {
3007 		list_del(&lnode->node);
3008 		kfree(lnode);
3009 	}
3010 
3011 	mutex_destroy(&ctx->hw_block_list_lock);
3012 }
3013