1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Copyright 2016-2021 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7 
8 #include "habanalabs.h"
9 #include "../include/hw_ip/mmu/mmu_general.h"
10 
11 #include <linux/pci.h>
12 #include <linux/uaccess.h>
13 #include <linux/vmalloc.h>
14 #include <linux/iommu.h>
15 
16 #define MMU_ADDR_BUF_SIZE	40
17 #define MMU_ASID_BUF_SIZE	10
18 #define MMU_KBUF_SIZE		(MMU_ADDR_BUF_SIZE + MMU_ASID_BUF_SIZE)
19 #define I2C_MAX_TRANSACTION_LEN	8
20 
21 static struct dentry *hl_debug_root;
22 
23 static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
24 				u8 i2c_reg, u8 i2c_len, u64 *val)
25 {
26 	struct cpucp_packet pkt;
27 	int rc;
28 
29 	if (!hl_device_operational(hdev, NULL))
30 		return -EBUSY;
31 
32 	if (i2c_len > I2C_MAX_TRANSACTION_LEN) {
33 		dev_err(hdev->dev, "I2C transaction length %u, exceeds maximum of %u\n",
34 				i2c_len, I2C_MAX_TRANSACTION_LEN);
35 		return -EINVAL;
36 	}
37 
38 	memset(&pkt, 0, sizeof(pkt));
39 
40 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_I2C_RD <<
41 				CPUCP_PKT_CTL_OPCODE_SHIFT);
42 	pkt.i2c_bus = i2c_bus;
43 	pkt.i2c_addr = i2c_addr;
44 	pkt.i2c_reg = i2c_reg;
45 	pkt.i2c_len = i2c_len;
46 
47 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
48 						0, val);
49 	if (rc)
50 		dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc);
51 
52 	return rc;
53 }
54 
55 static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
56 				u8 i2c_reg, u8 i2c_len, u64 val)
57 {
58 	struct cpucp_packet pkt;
59 	int rc;
60 
61 	if (!hl_device_operational(hdev, NULL))
62 		return -EBUSY;
63 
64 	if (i2c_len > I2C_MAX_TRANSACTION_LEN) {
65 		dev_err(hdev->dev, "I2C transaction length %u, exceeds maximum of %u\n",
66 				i2c_len, I2C_MAX_TRANSACTION_LEN);
67 		return -EINVAL;
68 	}
69 
70 	memset(&pkt, 0, sizeof(pkt));
71 
72 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_I2C_WR <<
73 				CPUCP_PKT_CTL_OPCODE_SHIFT);
74 	pkt.i2c_bus = i2c_bus;
75 	pkt.i2c_addr = i2c_addr;
76 	pkt.i2c_reg = i2c_reg;
77 	pkt.i2c_len = i2c_len;
78 	pkt.value = cpu_to_le64(val);
79 
80 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
81 						0, NULL);
82 
83 	if (rc)
84 		dev_err(hdev->dev, "Failed to write to I2C, error %d\n", rc);
85 
86 	return rc;
87 }
88 
89 static void hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state)
90 {
91 	struct cpucp_packet pkt;
92 	int rc;
93 
94 	if (!hl_device_operational(hdev, NULL))
95 		return;
96 
97 	memset(&pkt, 0, sizeof(pkt));
98 
99 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_LED_SET <<
100 				CPUCP_PKT_CTL_OPCODE_SHIFT);
101 	pkt.led_index = cpu_to_le32(led);
102 	pkt.value = cpu_to_le64(state);
103 
104 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
105 						0, NULL);
106 
107 	if (rc)
108 		dev_err(hdev->dev, "Failed to set LED %d, error %d\n", led, rc);
109 }
110 
111 static int command_buffers_show(struct seq_file *s, void *data)
112 {
113 	struct hl_debugfs_entry *entry = s->private;
114 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
115 	struct hl_cb *cb;
116 	bool first = true;
117 
118 	spin_lock(&dev_entry->cb_spinlock);
119 
120 	list_for_each_entry(cb, &dev_entry->cb_list, debugfs_list) {
121 		if (first) {
122 			first = false;
123 			seq_puts(s, "\n");
124 			seq_puts(s, " CB ID   CTX ID   CB size    CB RefCnt    mmap?   CS counter\n");
125 			seq_puts(s, "---------------------------------------------------------------\n");
126 		}
127 		seq_printf(s,
128 			"   %03llu        %d    0x%08x      %d          %d          %d\n",
129 			cb->buf->handle, cb->ctx->asid, cb->size,
130 			kref_read(&cb->buf->refcount),
131 			atomic_read(&cb->buf->mmap), atomic_read(&cb->cs_cnt));
132 	}
133 
134 	spin_unlock(&dev_entry->cb_spinlock);
135 
136 	if (!first)
137 		seq_puts(s, "\n");
138 
139 	return 0;
140 }
141 
142 static int command_submission_show(struct seq_file *s, void *data)
143 {
144 	struct hl_debugfs_entry *entry = s->private;
145 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
146 	struct hl_cs *cs;
147 	bool first = true;
148 
149 	spin_lock(&dev_entry->cs_spinlock);
150 
151 	list_for_each_entry(cs, &dev_entry->cs_list, debugfs_list) {
152 		if (first) {
153 			first = false;
154 			seq_puts(s, "\n");
155 			seq_puts(s, " CS ID   CS TYPE   CTX ASID   CS RefCnt   Submitted    Completed\n");
156 			seq_puts(s, "----------------------------------------------------------------\n");
157 		}
158 		seq_printf(s,
159 			"   %llu        %d          %d          %d           %d            %d\n",
160 			cs->sequence, cs->type, cs->ctx->asid,
161 			kref_read(&cs->refcount),
162 			cs->submitted, cs->completed);
163 	}
164 
165 	spin_unlock(&dev_entry->cs_spinlock);
166 
167 	if (!first)
168 		seq_puts(s, "\n");
169 
170 	return 0;
171 }
172 
173 static int command_submission_jobs_show(struct seq_file *s, void *data)
174 {
175 	struct hl_debugfs_entry *entry = s->private;
176 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
177 	struct hl_cs_job *job;
178 	bool first = true;
179 
180 	spin_lock(&dev_entry->cs_job_spinlock);
181 
182 	list_for_each_entry(job, &dev_entry->cs_job_list, debugfs_list) {
183 		if (first) {
184 			first = false;
185 			seq_puts(s, "\n");
186 			seq_puts(s, " JOB ID   CS ID    CS TYPE    CTX ASID   JOB RefCnt   H/W Queue\n");
187 			seq_puts(s, "---------------------------------------------------------------\n");
188 		}
189 		if (job->cs)
190 			seq_printf(s,
191 				"   %02d      %llu        %d        %d          %d           %d\n",
192 				job->id, job->cs->sequence, job->cs->type,
193 				job->cs->ctx->asid, kref_read(&job->refcount),
194 				job->hw_queue_id);
195 		else
196 			seq_printf(s,
197 				"   %02d      0        0        %d          %d           %d\n",
198 				job->id, HL_KERNEL_ASID_ID,
199 				kref_read(&job->refcount), job->hw_queue_id);
200 	}
201 
202 	spin_unlock(&dev_entry->cs_job_spinlock);
203 
204 	if (!first)
205 		seq_puts(s, "\n");
206 
207 	return 0;
208 }
209 
210 static int userptr_show(struct seq_file *s, void *data)
211 {
212 	struct hl_debugfs_entry *entry = s->private;
213 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
214 	struct hl_userptr *userptr;
215 	char dma_dir[4][30] = {"DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
216 				"DMA_FROM_DEVICE", "DMA_NONE"};
217 	bool first = true;
218 
219 	spin_lock(&dev_entry->userptr_spinlock);
220 
221 	list_for_each_entry(userptr, &dev_entry->userptr_list, debugfs_list) {
222 		if (first) {
223 			first = false;
224 			seq_puts(s, "\n");
225 			seq_puts(s, " pid      user virtual address     size             dma dir\n");
226 			seq_puts(s, "----------------------------------------------------------\n");
227 		}
228 		seq_printf(s, " %-7d  0x%-14llx      %-10llu    %-30s\n",
229 				userptr->pid, userptr->addr, userptr->size,
230 				dma_dir[userptr->dir]);
231 	}
232 
233 	spin_unlock(&dev_entry->userptr_spinlock);
234 
235 	if (!first)
236 		seq_puts(s, "\n");
237 
238 	return 0;
239 }
240 
241 static int vm_show(struct seq_file *s, void *data)
242 {
243 	struct hl_debugfs_entry *entry = s->private;
244 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
245 	struct hl_vm_hw_block_list_node *lnode;
246 	struct hl_ctx *ctx;
247 	struct hl_vm *vm;
248 	struct hl_vm_hash_node *hnode;
249 	struct hl_userptr *userptr;
250 	struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
251 	struct hl_va_range *va_range;
252 	struct hl_vm_va_block *va_block;
253 	enum vm_type *vm_type;
254 	bool once = true;
255 	u64 j;
256 	int i;
257 
258 	mutex_lock(&dev_entry->ctx_mem_hash_mutex);
259 
260 	list_for_each_entry(ctx, &dev_entry->ctx_mem_hash_list, debugfs_list) {
261 		once = false;
262 		seq_puts(s, "\n\n----------------------------------------------------");
263 		seq_puts(s, "\n----------------------------------------------------\n\n");
264 		seq_printf(s, "ctx asid: %u\n", ctx->asid);
265 
266 		seq_puts(s, "\nmappings:\n\n");
267 		seq_puts(s, "    virtual address        size          handle\n");
268 		seq_puts(s, "----------------------------------------------------\n");
269 		mutex_lock(&ctx->mem_hash_lock);
270 		hash_for_each(ctx->mem_hash, i, hnode, node) {
271 			vm_type = hnode->ptr;
272 
273 			if (*vm_type == VM_TYPE_USERPTR) {
274 				userptr = hnode->ptr;
275 				seq_printf(s,
276 					"    0x%-14llx      %-10llu\n",
277 					hnode->vaddr, userptr->size);
278 			} else {
279 				phys_pg_pack = hnode->ptr;
280 				seq_printf(s,
281 					"    0x%-14llx      %-10llu       %-4u\n",
282 					hnode->vaddr, phys_pg_pack->total_size,
283 					phys_pg_pack->handle);
284 			}
285 		}
286 		mutex_unlock(&ctx->mem_hash_lock);
287 
288 		if (ctx->asid != HL_KERNEL_ASID_ID &&
289 		    !list_empty(&ctx->hw_block_mem_list)) {
290 			seq_puts(s, "\nhw_block mappings:\n\n");
291 			seq_puts(s,
292 				"    virtual address    block size    mapped size    HW block id\n");
293 			seq_puts(s,
294 				"---------------------------------------------------------------\n");
295 			mutex_lock(&ctx->hw_block_list_lock);
296 			list_for_each_entry(lnode, &ctx->hw_block_mem_list, node) {
297 				seq_printf(s,
298 					"    0x%-14lx   %-6u        %-6u             %-9u\n",
299 					lnode->vaddr, lnode->block_size, lnode->mapped_size,
300 					lnode->id);
301 			}
302 			mutex_unlock(&ctx->hw_block_list_lock);
303 		}
304 
305 		vm = &ctx->hdev->vm;
306 		spin_lock(&vm->idr_lock);
307 
308 		if (!idr_is_empty(&vm->phys_pg_pack_handles))
309 			seq_puts(s, "\n\nallocations:\n");
310 
311 		idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_pack, i) {
312 			if (phys_pg_pack->asid != ctx->asid)
313 				continue;
314 
315 			seq_printf(s, "\nhandle: %u\n", phys_pg_pack->handle);
316 			seq_printf(s, "page size: %u\n\n",
317 						phys_pg_pack->page_size);
318 			seq_puts(s, "   physical address\n");
319 			seq_puts(s, "---------------------\n");
320 			for (j = 0 ; j < phys_pg_pack->npages ; j++) {
321 				seq_printf(s, "    0x%-14llx\n",
322 						phys_pg_pack->pages[j]);
323 			}
324 		}
325 		spin_unlock(&vm->idr_lock);
326 
327 	}
328 
329 	mutex_unlock(&dev_entry->ctx_mem_hash_mutex);
330 
331 	ctx = hl_get_compute_ctx(dev_entry->hdev);
332 	if (ctx) {
333 		seq_puts(s, "\nVA ranges:\n\n");
334 		for (i = HL_VA_RANGE_TYPE_HOST ; i < HL_VA_RANGE_TYPE_MAX ; ++i) {
335 			va_range = ctx->va_range[i];
336 			seq_printf(s, "   va_range %d\n", i);
337 			seq_puts(s, "---------------------\n");
338 			mutex_lock(&va_range->lock);
339 			list_for_each_entry(va_block, &va_range->list, node) {
340 				seq_printf(s, "%#16llx - %#16llx (%#llx)\n",
341 					   va_block->start, va_block->end,
342 					   va_block->size);
343 			}
344 			mutex_unlock(&va_range->lock);
345 			seq_puts(s, "\n");
346 		}
347 		hl_ctx_put(ctx);
348 	}
349 
350 	if (!once)
351 		seq_puts(s, "\n");
352 
353 	return 0;
354 }
355 
356 static int userptr_lookup_show(struct seq_file *s, void *data)
357 {
358 	struct hl_debugfs_entry *entry = s->private;
359 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
360 	struct scatterlist *sg;
361 	struct hl_userptr *userptr;
362 	bool first = true;
363 	u64 total_npages, npages, sg_start, sg_end;
364 	dma_addr_t dma_addr;
365 	int i;
366 
367 	spin_lock(&dev_entry->userptr_spinlock);
368 
369 	list_for_each_entry(userptr, &dev_entry->userptr_list, debugfs_list) {
370 		if (dev_entry->userptr_lookup >= userptr->addr &&
371 		dev_entry->userptr_lookup < userptr->addr + userptr->size) {
372 			total_npages = 0;
373 			for_each_sgtable_dma_sg(userptr->sgt, sg, i) {
374 				npages = hl_get_sg_info(sg, &dma_addr);
375 				sg_start = userptr->addr +
376 					total_npages * PAGE_SIZE;
377 				sg_end = userptr->addr +
378 					(total_npages + npages) * PAGE_SIZE;
379 
380 				if (dev_entry->userptr_lookup >= sg_start &&
381 				    dev_entry->userptr_lookup < sg_end) {
382 					dma_addr += (dev_entry->userptr_lookup -
383 							sg_start);
384 					if (first) {
385 						first = false;
386 						seq_puts(s, "\n");
387 						seq_puts(s, " user virtual address         dma address       pid        region start     region size\n");
388 						seq_puts(s, "---------------------------------------------------------------------------------------\n");
389 					}
390 					seq_printf(s, " 0x%-18llx  0x%-16llx  %-8u  0x%-16llx %-12llu\n",
391 						dev_entry->userptr_lookup,
392 						(u64)dma_addr, userptr->pid,
393 						userptr->addr, userptr->size);
394 				}
395 				total_npages += npages;
396 			}
397 		}
398 	}
399 
400 	spin_unlock(&dev_entry->userptr_spinlock);
401 
402 	if (!first)
403 		seq_puts(s, "\n");
404 
405 	return 0;
406 }
407 
408 static ssize_t userptr_lookup_write(struct file *file, const char __user *buf,
409 		size_t count, loff_t *f_pos)
410 {
411 	struct seq_file *s = file->private_data;
412 	struct hl_debugfs_entry *entry = s->private;
413 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
414 	ssize_t rc;
415 	u64 value;
416 
417 	rc = kstrtoull_from_user(buf, count, 16, &value);
418 	if (rc)
419 		return rc;
420 
421 	dev_entry->userptr_lookup = value;
422 
423 	return count;
424 }
425 
426 static int mmu_show(struct seq_file *s, void *data)
427 {
428 	struct hl_debugfs_entry *entry = s->private;
429 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
430 	struct hl_device *hdev = dev_entry->hdev;
431 	struct hl_ctx *ctx;
432 	struct hl_mmu_hop_info hops_info = {0};
433 	u64 virt_addr = dev_entry->mmu_addr, phys_addr;
434 	int i;
435 
436 	if (dev_entry->mmu_asid == HL_KERNEL_ASID_ID)
437 		ctx = hdev->kernel_ctx;
438 	else
439 		ctx = hl_get_compute_ctx(hdev);
440 
441 	if (!ctx) {
442 		dev_err(hdev->dev, "no ctx available\n");
443 		return 0;
444 	}
445 
446 	if (hl_mmu_get_tlb_info(ctx, virt_addr, &hops_info)) {
447 		dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
448 				virt_addr);
449 		goto put_ctx;
450 	}
451 
452 	hl_mmu_va_to_pa(ctx, virt_addr, &phys_addr);
453 
454 	if (hops_info.scrambled_vaddr &&
455 		(dev_entry->mmu_addr != hops_info.scrambled_vaddr))
456 		seq_printf(s,
457 			"asid: %u, virt_addr: 0x%llx, scrambled virt_addr: 0x%llx,\nphys_addr: 0x%llx, scrambled_phys_addr: 0x%llx\n",
458 			dev_entry->mmu_asid, dev_entry->mmu_addr,
459 			hops_info.scrambled_vaddr,
460 			hops_info.unscrambled_paddr, phys_addr);
461 	else
462 		seq_printf(s,
463 			"asid: %u, virt_addr: 0x%llx, phys_addr: 0x%llx\n",
464 			dev_entry->mmu_asid, dev_entry->mmu_addr, phys_addr);
465 
466 	for (i = 0 ; i < hops_info.used_hops ; i++) {
467 		seq_printf(s, "hop%d_addr: 0x%llx\n",
468 				i, hops_info.hop_info[i].hop_addr);
469 		seq_printf(s, "hop%d_pte_addr: 0x%llx\n",
470 				i, hops_info.hop_info[i].hop_pte_addr);
471 		seq_printf(s, "hop%d_pte: 0x%llx\n",
472 				i, hops_info.hop_info[i].hop_pte_val);
473 	}
474 
475 put_ctx:
476 	if (dev_entry->mmu_asid != HL_KERNEL_ASID_ID)
477 		hl_ctx_put(ctx);
478 
479 	return 0;
480 }
481 
482 static ssize_t mmu_asid_va_write(struct file *file, const char __user *buf,
483 		size_t count, loff_t *f_pos)
484 {
485 	struct seq_file *s = file->private_data;
486 	struct hl_debugfs_entry *entry = s->private;
487 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
488 	struct hl_device *hdev = dev_entry->hdev;
489 	char kbuf[MMU_KBUF_SIZE];
490 	char *c;
491 	ssize_t rc;
492 
493 	if (count > sizeof(kbuf) - 1)
494 		goto err;
495 	if (copy_from_user(kbuf, buf, count))
496 		goto err;
497 	kbuf[count] = 0;
498 
499 	c = strchr(kbuf, ' ');
500 	if (!c)
501 		goto err;
502 	*c = '\0';
503 
504 	rc = kstrtouint(kbuf, 10, &dev_entry->mmu_asid);
505 	if (rc)
506 		goto err;
507 
508 	if (strncmp(c+1, "0x", 2))
509 		goto err;
510 	rc = kstrtoull(c+3, 16, &dev_entry->mmu_addr);
511 	if (rc)
512 		goto err;
513 
514 	return count;
515 
516 err:
517 	dev_err(hdev->dev, "usage: echo <asid> <0xaddr> > mmu\n");
518 
519 	return -EINVAL;
520 }
521 
522 static int mmu_ack_error(struct seq_file *s, void *data)
523 {
524 	struct hl_debugfs_entry *entry = s->private;
525 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
526 	struct hl_device *hdev = dev_entry->hdev;
527 	int rc;
528 
529 	if (!dev_entry->mmu_cap_mask) {
530 		dev_err(hdev->dev, "mmu_cap_mask is not set\n");
531 		goto err;
532 	}
533 
534 	rc = hdev->asic_funcs->ack_mmu_errors(hdev, dev_entry->mmu_cap_mask);
535 	if (rc)
536 		goto err;
537 
538 	return 0;
539 err:
540 	return -EINVAL;
541 }
542 
543 static ssize_t mmu_ack_error_value_write(struct file *file,
544 		const char __user *buf,
545 		size_t count, loff_t *f_pos)
546 {
547 	struct seq_file *s = file->private_data;
548 	struct hl_debugfs_entry *entry = s->private;
549 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
550 	struct hl_device *hdev = dev_entry->hdev;
551 	char kbuf[MMU_KBUF_SIZE];
552 	ssize_t rc;
553 
554 	if (count > sizeof(kbuf) - 1)
555 		goto err;
556 
557 	if (copy_from_user(kbuf, buf, count))
558 		goto err;
559 
560 	kbuf[count] = 0;
561 
562 	if (strncmp(kbuf, "0x", 2))
563 		goto err;
564 
565 	rc = kstrtoull(kbuf, 16, &dev_entry->mmu_cap_mask);
566 	if (rc)
567 		goto err;
568 
569 	return count;
570 err:
571 	dev_err(hdev->dev, "usage: echo <0xmmu_cap_mask > > mmu_error\n");
572 
573 	return -EINVAL;
574 }
575 
576 static int engines_show(struct seq_file *s, void *data)
577 {
578 	struct hl_debugfs_entry *entry = s->private;
579 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
580 	struct hl_device *hdev = dev_entry->hdev;
581 	struct engines_data eng_data;
582 
583 	if (hdev->reset_info.in_reset) {
584 		dev_warn_ratelimited(hdev->dev,
585 				"Can't check device idle during reset\n");
586 		return 0;
587 	}
588 
589 	eng_data.actual_size = 0;
590 	eng_data.allocated_buf_size = HL_ENGINES_DATA_MAX_SIZE;
591 	eng_data.buf = vmalloc(eng_data.allocated_buf_size);
592 	if (!eng_data.buf)
593 		return -ENOMEM;
594 
595 	hdev->asic_funcs->is_device_idle(hdev, NULL, 0, &eng_data);
596 
597 	if (eng_data.actual_size > eng_data.allocated_buf_size) {
598 		dev_err(hdev->dev,
599 				"Engines data size (%d Bytes) is bigger than allocated size (%u Bytes)\n",
600 				eng_data.actual_size, eng_data.allocated_buf_size);
601 		vfree(eng_data.buf);
602 		return -ENOMEM;
603 	}
604 
605 	seq_write(s, eng_data.buf, eng_data.actual_size);
606 
607 	vfree(eng_data.buf);
608 
609 	return 0;
610 }
611 
612 static ssize_t hl_memory_scrub(struct file *f, const char __user *buf,
613 					size_t count, loff_t *ppos)
614 {
615 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
616 	struct hl_device *hdev = entry->hdev;
617 	u64 val = hdev->memory_scrub_val;
618 	int rc;
619 
620 	if (!hl_device_operational(hdev, NULL)) {
621 		dev_warn_ratelimited(hdev->dev, "Can't scrub memory, device is not operational\n");
622 		return -EIO;
623 	}
624 
625 	mutex_lock(&hdev->fpriv_list_lock);
626 	if (hdev->is_compute_ctx_active) {
627 		mutex_unlock(&hdev->fpriv_list_lock);
628 		dev_err(hdev->dev, "can't scrub dram, context exist\n");
629 		return -EBUSY;
630 	}
631 	hdev->is_in_dram_scrub = true;
632 	mutex_unlock(&hdev->fpriv_list_lock);
633 
634 	rc = hdev->asic_funcs->scrub_device_dram(hdev, val);
635 
636 	mutex_lock(&hdev->fpriv_list_lock);
637 	hdev->is_in_dram_scrub = false;
638 	mutex_unlock(&hdev->fpriv_list_lock);
639 
640 	if (rc)
641 		return rc;
642 	return count;
643 }
644 
645 static bool hl_is_device_va(struct hl_device *hdev, u64 addr)
646 {
647 	struct asic_fixed_properties *prop = &hdev->asic_prop;
648 
649 	if (prop->dram_supports_virtual_memory &&
650 		(addr >= prop->dmmu.start_addr && addr < prop->dmmu.end_addr))
651 		return true;
652 
653 	if (addr >= prop->pmmu.start_addr &&
654 		addr < prop->pmmu.end_addr)
655 		return true;
656 
657 	if (addr >= prop->pmmu_huge.start_addr &&
658 		addr < prop->pmmu_huge.end_addr)
659 		return true;
660 
661 	return false;
662 }
663 
664 static bool hl_is_device_internal_memory_va(struct hl_device *hdev, u64 addr,
665 						u32 size)
666 {
667 	struct asic_fixed_properties *prop = &hdev->asic_prop;
668 	u64 dram_start_addr, dram_end_addr;
669 
670 	if (prop->dram_supports_virtual_memory) {
671 		dram_start_addr = prop->dmmu.start_addr;
672 		dram_end_addr = prop->dmmu.end_addr;
673 	} else {
674 		dram_start_addr = prop->dram_base_address;
675 		dram_end_addr = prop->dram_end_address;
676 	}
677 
678 	if (hl_mem_area_inside_range(addr, size, dram_start_addr,
679 					dram_end_addr))
680 		return true;
681 
682 	if (hl_mem_area_inside_range(addr, size, prop->sram_base_address,
683 					prop->sram_end_address))
684 		return true;
685 
686 	return false;
687 }
688 
689 static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr, u32 size,
690 			u64 *phys_addr)
691 {
692 	struct hl_vm_phys_pg_pack *phys_pg_pack;
693 	struct hl_ctx *ctx;
694 	struct hl_vm_hash_node *hnode;
695 	u64 end_address, range_size;
696 	struct hl_userptr *userptr;
697 	enum vm_type *vm_type;
698 	bool valid = false;
699 	int i, rc = 0;
700 
701 	ctx = hl_get_compute_ctx(hdev);
702 
703 	if (!ctx) {
704 		dev_err(hdev->dev, "no ctx available\n");
705 		return -EINVAL;
706 	}
707 
708 	/* Verify address is mapped */
709 	mutex_lock(&ctx->mem_hash_lock);
710 	hash_for_each(ctx->mem_hash, i, hnode, node) {
711 		vm_type = hnode->ptr;
712 
713 		if (*vm_type == VM_TYPE_USERPTR) {
714 			userptr = hnode->ptr;
715 			range_size = userptr->size;
716 		} else {
717 			phys_pg_pack = hnode->ptr;
718 			range_size = phys_pg_pack->total_size;
719 		}
720 
721 		end_address = virt_addr + size;
722 		if ((virt_addr >= hnode->vaddr) &&
723 				(end_address <= hnode->vaddr + range_size)) {
724 			valid = true;
725 			break;
726 		}
727 	}
728 	mutex_unlock(&ctx->mem_hash_lock);
729 
730 	if (!valid) {
731 		dev_err(hdev->dev,
732 			"virt addr 0x%llx is not mapped\n",
733 			virt_addr);
734 		rc = -EINVAL;
735 		goto put_ctx;
736 	}
737 
738 	rc = hl_mmu_va_to_pa(ctx, virt_addr, phys_addr);
739 	if (rc) {
740 		dev_err(hdev->dev,
741 			"virt addr 0x%llx is not mapped to phys addr\n",
742 			virt_addr);
743 		rc = -EINVAL;
744 	}
745 
746 put_ctx:
747 	hl_ctx_put(ctx);
748 
749 	return rc;
750 }
751 
752 static int hl_access_dev_mem_by_region(struct hl_device *hdev, u64 addr,
753 		u64 *val, enum debugfs_access_type acc_type, bool *found)
754 {
755 	size_t acc_size = (acc_type == DEBUGFS_READ64 || acc_type == DEBUGFS_WRITE64) ?
756 		sizeof(u64) : sizeof(u32);
757 	struct pci_mem_region *mem_reg;
758 	int i;
759 
760 	for (i = 0; i < PCI_REGION_NUMBER; i++) {
761 		mem_reg = &hdev->pci_mem_region[i];
762 		if (!mem_reg->used)
763 			continue;
764 		if (addr >= mem_reg->region_base &&
765 			addr <= mem_reg->region_base + mem_reg->region_size - acc_size) {
766 			*found = true;
767 			return hdev->asic_funcs->access_dev_mem(hdev, i, addr, val, acc_type);
768 		}
769 	}
770 	return 0;
771 }
772 
773 static void hl_access_host_mem(struct hl_device *hdev, u64 addr, u64 *val,
774 		enum debugfs_access_type acc_type)
775 {
776 	struct asic_fixed_properties *prop = &hdev->asic_prop;
777 	u64 offset = prop->device_dma_offset_for_host_access;
778 
779 	switch (acc_type) {
780 	case DEBUGFS_READ32:
781 		*val = *(u32 *) phys_to_virt(addr - offset);
782 		break;
783 	case DEBUGFS_WRITE32:
784 		*(u32 *) phys_to_virt(addr - offset) = *val;
785 		break;
786 	case DEBUGFS_READ64:
787 		*val = *(u64 *) phys_to_virt(addr - offset);
788 		break;
789 	case DEBUGFS_WRITE64:
790 		*(u64 *) phys_to_virt(addr - offset) = *val;
791 		break;
792 	default:
793 		dev_err(hdev->dev, "hostmem access-type %d id not supported\n", acc_type);
794 		break;
795 	}
796 }
797 
798 static int hl_access_mem(struct hl_device *hdev, u64 addr, u64 *val,
799 				enum debugfs_access_type acc_type)
800 {
801 	size_t acc_size = (acc_type == DEBUGFS_READ64 || acc_type == DEBUGFS_WRITE64) ?
802 		sizeof(u64) : sizeof(u32);
803 	u64 host_start = hdev->asic_prop.host_base_address;
804 	u64 host_end = hdev->asic_prop.host_end_address;
805 	bool user_address, found = false;
806 	int rc;
807 
808 	user_address = hl_is_device_va(hdev, addr);
809 	if (user_address) {
810 		rc = device_va_to_pa(hdev, addr, acc_size, &addr);
811 		if (rc)
812 			return rc;
813 	}
814 
815 	rc = hl_access_dev_mem_by_region(hdev, addr, val, acc_type, &found);
816 	if (rc) {
817 		dev_err(hdev->dev,
818 			"Failed reading addr %#llx from dev mem (%d)\n",
819 			addr, rc);
820 		return rc;
821 	}
822 
823 	if (found)
824 		return 0;
825 
826 	if (!user_address || device_iommu_mapped(&hdev->pdev->dev)) {
827 		rc = -EINVAL;
828 		goto err;
829 	}
830 
831 	if (addr >= host_start && addr <= host_end - acc_size) {
832 		hl_access_host_mem(hdev, addr, val, acc_type);
833 	} else {
834 		rc = -EINVAL;
835 		goto err;
836 	}
837 
838 	return 0;
839 err:
840 	dev_err(hdev->dev, "invalid addr %#llx\n", addr);
841 	return rc;
842 }
843 
844 static ssize_t hl_data_read32(struct file *f, char __user *buf,
845 					size_t count, loff_t *ppos)
846 {
847 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
848 	struct hl_device *hdev = entry->hdev;
849 	u64 value64, addr = entry->addr;
850 	char tmp_buf[32];
851 	ssize_t rc;
852 	u32 val;
853 
854 	if (hdev->reset_info.in_reset) {
855 		dev_warn_ratelimited(hdev->dev, "Can't read during reset\n");
856 		return 0;
857 	}
858 
859 	if (*ppos)
860 		return 0;
861 
862 	rc = hl_access_mem(hdev, addr, &value64, DEBUGFS_READ32);
863 	if (rc)
864 		return rc;
865 
866 	val = value64; /* downcast back to 32 */
867 
868 	sprintf(tmp_buf, "0x%08x\n", val);
869 	return simple_read_from_buffer(buf, count, ppos, tmp_buf,
870 			strlen(tmp_buf));
871 }
872 
873 static ssize_t hl_data_write32(struct file *f, const char __user *buf,
874 					size_t count, loff_t *ppos)
875 {
876 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
877 	struct hl_device *hdev = entry->hdev;
878 	u64 value64, addr = entry->addr;
879 	u32 value;
880 	ssize_t rc;
881 
882 	if (hdev->reset_info.in_reset) {
883 		dev_warn_ratelimited(hdev->dev, "Can't write during reset\n");
884 		return 0;
885 	}
886 
887 	rc = kstrtouint_from_user(buf, count, 16, &value);
888 	if (rc)
889 		return rc;
890 
891 	value64 = value;
892 	rc = hl_access_mem(hdev, addr, &value64, DEBUGFS_WRITE32);
893 	if (rc)
894 		return rc;
895 
896 	return count;
897 }
898 
899 static ssize_t hl_data_read64(struct file *f, char __user *buf,
900 					size_t count, loff_t *ppos)
901 {
902 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
903 	struct hl_device *hdev = entry->hdev;
904 	u64 addr = entry->addr;
905 	char tmp_buf[32];
906 	ssize_t rc;
907 	u64 val;
908 
909 	if (hdev->reset_info.in_reset) {
910 		dev_warn_ratelimited(hdev->dev, "Can't read during reset\n");
911 		return 0;
912 	}
913 
914 	if (*ppos)
915 		return 0;
916 
917 	rc = hl_access_mem(hdev, addr, &val, DEBUGFS_READ64);
918 	if (rc)
919 		return rc;
920 
921 	sprintf(tmp_buf, "0x%016llx\n", val);
922 	return simple_read_from_buffer(buf, count, ppos, tmp_buf,
923 			strlen(tmp_buf));
924 }
925 
926 static ssize_t hl_data_write64(struct file *f, const char __user *buf,
927 					size_t count, loff_t *ppos)
928 {
929 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
930 	struct hl_device *hdev = entry->hdev;
931 	u64 addr = entry->addr;
932 	u64 value;
933 	ssize_t rc;
934 
935 	if (hdev->reset_info.in_reset) {
936 		dev_warn_ratelimited(hdev->dev, "Can't write during reset\n");
937 		return 0;
938 	}
939 
940 	rc = kstrtoull_from_user(buf, count, 16, &value);
941 	if (rc)
942 		return rc;
943 
944 	rc = hl_access_mem(hdev, addr, &value, DEBUGFS_WRITE64);
945 	if (rc)
946 		return rc;
947 
948 	return count;
949 }
950 
951 static ssize_t hl_dma_size_write(struct file *f, const char __user *buf,
952 					size_t count, loff_t *ppos)
953 {
954 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
955 	struct hl_device *hdev = entry->hdev;
956 	u64 addr = entry->addr;
957 	ssize_t rc;
958 	u32 size;
959 
960 	if (hdev->reset_info.in_reset) {
961 		dev_warn_ratelimited(hdev->dev, "Can't DMA during reset\n");
962 		return 0;
963 	}
964 	rc = kstrtouint_from_user(buf, count, 16, &size);
965 	if (rc)
966 		return rc;
967 
968 	if (!size) {
969 		dev_err(hdev->dev, "DMA read failed. size can't be 0\n");
970 		return -EINVAL;
971 	}
972 
973 	if (size > SZ_128M) {
974 		dev_err(hdev->dev,
975 			"DMA read failed. size can't be larger than 128MB\n");
976 		return -EINVAL;
977 	}
978 
979 	if (!hl_is_device_internal_memory_va(hdev, addr, size)) {
980 		dev_err(hdev->dev,
981 			"DMA read failed. Invalid 0x%010llx + 0x%08x\n",
982 			addr, size);
983 		return -EINVAL;
984 	}
985 
986 	/* Free the previous allocation, if there was any */
987 	entry->data_dma_blob_desc.size = 0;
988 	vfree(entry->data_dma_blob_desc.data);
989 
990 	entry->data_dma_blob_desc.data = vmalloc(size);
991 	if (!entry->data_dma_blob_desc.data)
992 		return -ENOMEM;
993 
994 	rc = hdev->asic_funcs->debugfs_read_dma(hdev, addr, size,
995 						entry->data_dma_blob_desc.data);
996 	if (rc) {
997 		dev_err(hdev->dev, "Failed to DMA from 0x%010llx\n", addr);
998 		vfree(entry->data_dma_blob_desc.data);
999 		entry->data_dma_blob_desc.data = NULL;
1000 		return -EIO;
1001 	}
1002 
1003 	entry->data_dma_blob_desc.size = size;
1004 
1005 	return count;
1006 }
1007 
1008 static ssize_t hl_monitor_dump_trigger(struct file *f, const char __user *buf,
1009 					size_t count, loff_t *ppos)
1010 {
1011 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1012 	struct hl_device *hdev = entry->hdev;
1013 	u32 size, trig;
1014 	ssize_t rc;
1015 
1016 	if (hdev->reset_info.in_reset) {
1017 		dev_warn_ratelimited(hdev->dev, "Can't dump monitors during reset\n");
1018 		return 0;
1019 	}
1020 	rc = kstrtouint_from_user(buf, count, 10, &trig);
1021 	if (rc)
1022 		return rc;
1023 
1024 	if (trig != 1) {
1025 		dev_err(hdev->dev, "Must write 1 to trigger monitor dump\n");
1026 		return -EINVAL;
1027 	}
1028 
1029 	size = sizeof(struct cpucp_monitor_dump);
1030 
1031 	/* Free the previous allocation, if there was any */
1032 	entry->mon_dump_blob_desc.size = 0;
1033 	vfree(entry->mon_dump_blob_desc.data);
1034 
1035 	entry->mon_dump_blob_desc.data = vmalloc(size);
1036 	if (!entry->mon_dump_blob_desc.data)
1037 		return -ENOMEM;
1038 
1039 	rc = hdev->asic_funcs->get_monitor_dump(hdev, entry->mon_dump_blob_desc.data);
1040 	if (rc) {
1041 		dev_err(hdev->dev, "Failed to dump monitors\n");
1042 		vfree(entry->mon_dump_blob_desc.data);
1043 		entry->mon_dump_blob_desc.data = NULL;
1044 		return -EIO;
1045 	}
1046 
1047 	entry->mon_dump_blob_desc.size = size;
1048 
1049 	return count;
1050 }
1051 
1052 static ssize_t hl_get_power_state(struct file *f, char __user *buf,
1053 		size_t count, loff_t *ppos)
1054 {
1055 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1056 	struct hl_device *hdev = entry->hdev;
1057 	char tmp_buf[200];
1058 	int i;
1059 
1060 	if (*ppos)
1061 		return 0;
1062 
1063 	if (hdev->pdev->current_state == PCI_D0)
1064 		i = 1;
1065 	else if (hdev->pdev->current_state == PCI_D3hot)
1066 		i = 2;
1067 	else
1068 		i = 3;
1069 
1070 	sprintf(tmp_buf,
1071 		"current power state: %d\n1 - D0\n2 - D3hot\n3 - Unknown\n", i);
1072 	return simple_read_from_buffer(buf, count, ppos, tmp_buf,
1073 			strlen(tmp_buf));
1074 }
1075 
1076 static ssize_t hl_set_power_state(struct file *f, const char __user *buf,
1077 					size_t count, loff_t *ppos)
1078 {
1079 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1080 	struct hl_device *hdev = entry->hdev;
1081 	u32 value;
1082 	ssize_t rc;
1083 
1084 	rc = kstrtouint_from_user(buf, count, 10, &value);
1085 	if (rc)
1086 		return rc;
1087 
1088 	if (value == 1) {
1089 		pci_set_power_state(hdev->pdev, PCI_D0);
1090 		pci_restore_state(hdev->pdev);
1091 		rc = pci_enable_device(hdev->pdev);
1092 		if (rc < 0)
1093 			return rc;
1094 	} else if (value == 2) {
1095 		pci_save_state(hdev->pdev);
1096 		pci_disable_device(hdev->pdev);
1097 		pci_set_power_state(hdev->pdev, PCI_D3hot);
1098 	} else {
1099 		dev_dbg(hdev->dev, "invalid power state value %u\n", value);
1100 		return -EINVAL;
1101 	}
1102 
1103 	return count;
1104 }
1105 
1106 static ssize_t hl_i2c_data_read(struct file *f, char __user *buf,
1107 					size_t count, loff_t *ppos)
1108 {
1109 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1110 	struct hl_device *hdev = entry->hdev;
1111 	char tmp_buf[32];
1112 	u64 val;
1113 	ssize_t rc;
1114 
1115 	if (*ppos)
1116 		return 0;
1117 
1118 	rc = hl_debugfs_i2c_read(hdev, entry->i2c_bus, entry->i2c_addr,
1119 			entry->i2c_reg, entry->i2c_len, &val);
1120 	if (rc) {
1121 		dev_err(hdev->dev,
1122 			"Failed to read from I2C bus %d, addr %d, reg %d, len %d\n",
1123 			entry->i2c_bus, entry->i2c_addr, entry->i2c_reg, entry->i2c_len);
1124 		return rc;
1125 	}
1126 
1127 	sprintf(tmp_buf, "%#02llx\n", val);
1128 	rc = simple_read_from_buffer(buf, count, ppos, tmp_buf,
1129 			strlen(tmp_buf));
1130 
1131 	return rc;
1132 }
1133 
1134 static ssize_t hl_i2c_data_write(struct file *f, const char __user *buf,
1135 					size_t count, loff_t *ppos)
1136 {
1137 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1138 	struct hl_device *hdev = entry->hdev;
1139 	u64 value;
1140 	ssize_t rc;
1141 
1142 	rc = kstrtou64_from_user(buf, count, 16, &value);
1143 	if (rc)
1144 		return rc;
1145 
1146 	rc = hl_debugfs_i2c_write(hdev, entry->i2c_bus, entry->i2c_addr,
1147 			entry->i2c_reg, entry->i2c_len, value);
1148 	if (rc) {
1149 		dev_err(hdev->dev,
1150 			"Failed to write %#02llx to I2C bus %d, addr %d, reg %d, len %d\n",
1151 			value, entry->i2c_bus, entry->i2c_addr, entry->i2c_reg, entry->i2c_len);
1152 		return rc;
1153 	}
1154 
1155 	return count;
1156 }
1157 
1158 static ssize_t hl_led0_write(struct file *f, const char __user *buf,
1159 					size_t count, loff_t *ppos)
1160 {
1161 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1162 	struct hl_device *hdev = entry->hdev;
1163 	u32 value;
1164 	ssize_t rc;
1165 
1166 	rc = kstrtouint_from_user(buf, count, 10, &value);
1167 	if (rc)
1168 		return rc;
1169 
1170 	value = value ? 1 : 0;
1171 
1172 	hl_debugfs_led_set(hdev, 0, value);
1173 
1174 	return count;
1175 }
1176 
1177 static ssize_t hl_led1_write(struct file *f, const char __user *buf,
1178 					size_t count, loff_t *ppos)
1179 {
1180 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1181 	struct hl_device *hdev = entry->hdev;
1182 	u32 value;
1183 	ssize_t rc;
1184 
1185 	rc = kstrtouint_from_user(buf, count, 10, &value);
1186 	if (rc)
1187 		return rc;
1188 
1189 	value = value ? 1 : 0;
1190 
1191 	hl_debugfs_led_set(hdev, 1, value);
1192 
1193 	return count;
1194 }
1195 
1196 static ssize_t hl_led2_write(struct file *f, const char __user *buf,
1197 					size_t count, loff_t *ppos)
1198 {
1199 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1200 	struct hl_device *hdev = entry->hdev;
1201 	u32 value;
1202 	ssize_t rc;
1203 
1204 	rc = kstrtouint_from_user(buf, count, 10, &value);
1205 	if (rc)
1206 		return rc;
1207 
1208 	value = value ? 1 : 0;
1209 
1210 	hl_debugfs_led_set(hdev, 2, value);
1211 
1212 	return count;
1213 }
1214 
1215 static ssize_t hl_device_read(struct file *f, char __user *buf,
1216 					size_t count, loff_t *ppos)
1217 {
1218 	static const char *help =
1219 		"Valid values: disable, enable, suspend, resume, cpu_timeout\n";
1220 	return simple_read_from_buffer(buf, count, ppos, help, strlen(help));
1221 }
1222 
1223 static ssize_t hl_device_write(struct file *f, const char __user *buf,
1224 				     size_t count, loff_t *ppos)
1225 {
1226 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1227 	struct hl_device *hdev = entry->hdev;
1228 	char data[30] = {0};
1229 
1230 	/* don't allow partial writes */
1231 	if (*ppos != 0)
1232 		return 0;
1233 
1234 	simple_write_to_buffer(data, 29, ppos, buf, count);
1235 
1236 	if (strncmp("disable", data, strlen("disable")) == 0) {
1237 		hdev->disabled = true;
1238 	} else if (strncmp("enable", data, strlen("enable")) == 0) {
1239 		hdev->disabled = false;
1240 	} else if (strncmp("suspend", data, strlen("suspend")) == 0) {
1241 		hdev->asic_funcs->suspend(hdev);
1242 	} else if (strncmp("resume", data, strlen("resume")) == 0) {
1243 		hdev->asic_funcs->resume(hdev);
1244 	} else if (strncmp("cpu_timeout", data, strlen("cpu_timeout")) == 0) {
1245 		hdev->device_cpu_disabled = true;
1246 	} else {
1247 		dev_err(hdev->dev,
1248 			"Valid values: disable, enable, suspend, resume, cpu_timeout\n");
1249 		count = -EINVAL;
1250 	}
1251 
1252 	return count;
1253 }
1254 
1255 static ssize_t hl_clk_gate_read(struct file *f, char __user *buf,
1256 					size_t count, loff_t *ppos)
1257 {
1258 	return 0;
1259 }
1260 
1261 static ssize_t hl_clk_gate_write(struct file *f, const char __user *buf,
1262 				     size_t count, loff_t *ppos)
1263 {
1264 	return count;
1265 }
1266 
1267 static ssize_t hl_stop_on_err_read(struct file *f, char __user *buf,
1268 					size_t count, loff_t *ppos)
1269 {
1270 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1271 	struct hl_device *hdev = entry->hdev;
1272 	char tmp_buf[200];
1273 	ssize_t rc;
1274 
1275 	if (!hdev->asic_prop.configurable_stop_on_err)
1276 		return -EOPNOTSUPP;
1277 
1278 	if (*ppos)
1279 		return 0;
1280 
1281 	sprintf(tmp_buf, "%d\n", hdev->stop_on_err);
1282 	rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
1283 			strlen(tmp_buf) + 1);
1284 
1285 	return rc;
1286 }
1287 
1288 static ssize_t hl_stop_on_err_write(struct file *f, const char __user *buf,
1289 				     size_t count, loff_t *ppos)
1290 {
1291 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1292 	struct hl_device *hdev = entry->hdev;
1293 	u32 value;
1294 	ssize_t rc;
1295 
1296 	if (!hdev->asic_prop.configurable_stop_on_err)
1297 		return -EOPNOTSUPP;
1298 
1299 	if (hdev->reset_info.in_reset) {
1300 		dev_warn_ratelimited(hdev->dev,
1301 				"Can't change stop on error during reset\n");
1302 		return 0;
1303 	}
1304 
1305 	rc = kstrtouint_from_user(buf, count, 10, &value);
1306 	if (rc)
1307 		return rc;
1308 
1309 	hdev->stop_on_err = value ? 1 : 0;
1310 
1311 	hl_device_reset(hdev, 0);
1312 
1313 	return count;
1314 }
1315 
1316 static ssize_t hl_security_violations_read(struct file *f, char __user *buf,
1317 					size_t count, loff_t *ppos)
1318 {
1319 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1320 	struct hl_device *hdev = entry->hdev;
1321 
1322 	hdev->asic_funcs->ack_protection_bits_errors(hdev);
1323 
1324 	return 0;
1325 }
1326 
1327 static ssize_t hl_state_dump_read(struct file *f, char __user *buf,
1328 					size_t count, loff_t *ppos)
1329 {
1330 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1331 	ssize_t rc;
1332 
1333 	down_read(&entry->state_dump_sem);
1334 	if (!entry->state_dump[entry->state_dump_head])
1335 		rc = 0;
1336 	else
1337 		rc = simple_read_from_buffer(
1338 			buf, count, ppos,
1339 			entry->state_dump[entry->state_dump_head],
1340 			strlen(entry->state_dump[entry->state_dump_head]));
1341 	up_read(&entry->state_dump_sem);
1342 
1343 	return rc;
1344 }
1345 
1346 static ssize_t hl_state_dump_write(struct file *f, const char __user *buf,
1347 					size_t count, loff_t *ppos)
1348 {
1349 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1350 	struct hl_device *hdev = entry->hdev;
1351 	ssize_t rc;
1352 	u32 size;
1353 	int i;
1354 
1355 	rc = kstrtouint_from_user(buf, count, 10, &size);
1356 	if (rc)
1357 		return rc;
1358 
1359 	if (size <= 0 || size >= ARRAY_SIZE(entry->state_dump)) {
1360 		dev_err(hdev->dev, "Invalid number of dumps to skip\n");
1361 		return -EINVAL;
1362 	}
1363 
1364 	if (entry->state_dump[entry->state_dump_head]) {
1365 		down_write(&entry->state_dump_sem);
1366 		for (i = 0; i < size; ++i) {
1367 			vfree(entry->state_dump[entry->state_dump_head]);
1368 			entry->state_dump[entry->state_dump_head] = NULL;
1369 			if (entry->state_dump_head > 0)
1370 				entry->state_dump_head--;
1371 			else
1372 				entry->state_dump_head =
1373 					ARRAY_SIZE(entry->state_dump) - 1;
1374 		}
1375 		up_write(&entry->state_dump_sem);
1376 	}
1377 
1378 	return count;
1379 }
1380 
1381 static ssize_t hl_timeout_locked_read(struct file *f, char __user *buf,
1382 					size_t count, loff_t *ppos)
1383 {
1384 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1385 	struct hl_device *hdev = entry->hdev;
1386 	char tmp_buf[200];
1387 	ssize_t rc;
1388 
1389 	if (*ppos)
1390 		return 0;
1391 
1392 	sprintf(tmp_buf, "%d\n",
1393 		jiffies_to_msecs(hdev->timeout_jiffies) / 1000);
1394 	rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
1395 			strlen(tmp_buf) + 1);
1396 
1397 	return rc;
1398 }
1399 
1400 static ssize_t hl_timeout_locked_write(struct file *f, const char __user *buf,
1401 				     size_t count, loff_t *ppos)
1402 {
1403 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1404 	struct hl_device *hdev = entry->hdev;
1405 	u32 value;
1406 	ssize_t rc;
1407 
1408 	rc = kstrtouint_from_user(buf, count, 10, &value);
1409 	if (rc)
1410 		return rc;
1411 
1412 	if (value)
1413 		hdev->timeout_jiffies = msecs_to_jiffies(value * 1000);
1414 	else
1415 		hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
1416 
1417 	return count;
1418 }
1419 
1420 static ssize_t hl_check_razwi_happened(struct file *f, char __user *buf,
1421 					size_t count, loff_t *ppos)
1422 {
1423 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1424 	struct hl_device *hdev = entry->hdev;
1425 
1426 	hdev->asic_funcs->check_if_razwi_happened(hdev);
1427 
1428 	return 0;
1429 }
1430 
1431 static const struct file_operations hl_mem_scrub_fops = {
1432 	.owner = THIS_MODULE,
1433 	.write = hl_memory_scrub,
1434 };
1435 
1436 static const struct file_operations hl_data32b_fops = {
1437 	.owner = THIS_MODULE,
1438 	.read = hl_data_read32,
1439 	.write = hl_data_write32
1440 };
1441 
1442 static const struct file_operations hl_data64b_fops = {
1443 	.owner = THIS_MODULE,
1444 	.read = hl_data_read64,
1445 	.write = hl_data_write64
1446 };
1447 
1448 static const struct file_operations hl_dma_size_fops = {
1449 	.owner = THIS_MODULE,
1450 	.write = hl_dma_size_write
1451 };
1452 
1453 static const struct file_operations hl_monitor_dump_fops = {
1454 	.owner = THIS_MODULE,
1455 	.write = hl_monitor_dump_trigger
1456 };
1457 
1458 static const struct file_operations hl_i2c_data_fops = {
1459 	.owner = THIS_MODULE,
1460 	.read = hl_i2c_data_read,
1461 	.write = hl_i2c_data_write
1462 };
1463 
1464 static const struct file_operations hl_power_fops = {
1465 	.owner = THIS_MODULE,
1466 	.read = hl_get_power_state,
1467 	.write = hl_set_power_state
1468 };
1469 
1470 static const struct file_operations hl_led0_fops = {
1471 	.owner = THIS_MODULE,
1472 	.write = hl_led0_write
1473 };
1474 
1475 static const struct file_operations hl_led1_fops = {
1476 	.owner = THIS_MODULE,
1477 	.write = hl_led1_write
1478 };
1479 
1480 static const struct file_operations hl_led2_fops = {
1481 	.owner = THIS_MODULE,
1482 	.write = hl_led2_write
1483 };
1484 
1485 static const struct file_operations hl_device_fops = {
1486 	.owner = THIS_MODULE,
1487 	.read = hl_device_read,
1488 	.write = hl_device_write
1489 };
1490 
1491 static const struct file_operations hl_clk_gate_fops = {
1492 	.owner = THIS_MODULE,
1493 	.read = hl_clk_gate_read,
1494 	.write = hl_clk_gate_write
1495 };
1496 
1497 static const struct file_operations hl_stop_on_err_fops = {
1498 	.owner = THIS_MODULE,
1499 	.read = hl_stop_on_err_read,
1500 	.write = hl_stop_on_err_write
1501 };
1502 
1503 static const struct file_operations hl_security_violations_fops = {
1504 	.owner = THIS_MODULE,
1505 	.read = hl_security_violations_read
1506 };
1507 
1508 static const struct file_operations hl_state_dump_fops = {
1509 	.owner = THIS_MODULE,
1510 	.read = hl_state_dump_read,
1511 	.write = hl_state_dump_write
1512 };
1513 
1514 static const struct file_operations hl_timeout_locked_fops = {
1515 	.owner = THIS_MODULE,
1516 	.read = hl_timeout_locked_read,
1517 	.write = hl_timeout_locked_write
1518 };
1519 
1520 static const struct file_operations hl_razwi_check_fops = {
1521 	.owner = THIS_MODULE,
1522 	.read = hl_check_razwi_happened
1523 };
1524 
1525 static const struct hl_info_list hl_debugfs_list[] = {
1526 	{"command_buffers", command_buffers_show, NULL},
1527 	{"command_submission", command_submission_show, NULL},
1528 	{"command_submission_jobs", command_submission_jobs_show, NULL},
1529 	{"userptr", userptr_show, NULL},
1530 	{"vm", vm_show, NULL},
1531 	{"userptr_lookup", userptr_lookup_show, userptr_lookup_write},
1532 	{"mmu", mmu_show, mmu_asid_va_write},
1533 	{"mmu_error", mmu_ack_error, mmu_ack_error_value_write},
1534 	{"engines", engines_show, NULL},
1535 };
1536 
1537 static int hl_debugfs_open(struct inode *inode, struct file *file)
1538 {
1539 	struct hl_debugfs_entry *node = inode->i_private;
1540 
1541 	return single_open(file, node->info_ent->show, node);
1542 }
1543 
1544 static ssize_t hl_debugfs_write(struct file *file, const char __user *buf,
1545 		size_t count, loff_t *f_pos)
1546 {
1547 	struct hl_debugfs_entry *node = file->f_inode->i_private;
1548 
1549 	if (node->info_ent->write)
1550 		return node->info_ent->write(file, buf, count, f_pos);
1551 	else
1552 		return -EINVAL;
1553 
1554 }
1555 
1556 static const struct file_operations hl_debugfs_fops = {
1557 	.owner = THIS_MODULE,
1558 	.open = hl_debugfs_open,
1559 	.read = seq_read,
1560 	.write = hl_debugfs_write,
1561 	.llseek = seq_lseek,
1562 	.release = single_release,
1563 };
1564 
1565 static void add_secured_nodes(struct hl_dbg_device_entry *dev_entry, struct dentry *root)
1566 {
1567 	debugfs_create_u8("i2c_bus",
1568 				0644,
1569 				root,
1570 				&dev_entry->i2c_bus);
1571 
1572 	debugfs_create_u8("i2c_addr",
1573 				0644,
1574 				root,
1575 				&dev_entry->i2c_addr);
1576 
1577 	debugfs_create_u8("i2c_reg",
1578 				0644,
1579 				root,
1580 				&dev_entry->i2c_reg);
1581 
1582 	debugfs_create_u8("i2c_len",
1583 				0644,
1584 				root,
1585 				&dev_entry->i2c_len);
1586 
1587 	debugfs_create_file("i2c_data",
1588 				0644,
1589 				root,
1590 				dev_entry,
1591 				&hl_i2c_data_fops);
1592 
1593 	debugfs_create_file("led0",
1594 				0200,
1595 				root,
1596 				dev_entry,
1597 				&hl_led0_fops);
1598 
1599 	debugfs_create_file("led1",
1600 				0200,
1601 				root,
1602 				dev_entry,
1603 				&hl_led1_fops);
1604 
1605 	debugfs_create_file("led2",
1606 				0200,
1607 				root,
1608 				dev_entry,
1609 				&hl_led2_fops);
1610 }
1611 
1612 static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_entry *dev_entry,
1613 				struct dentry *root)
1614 {
1615 	int count = ARRAY_SIZE(hl_debugfs_list);
1616 	struct hl_debugfs_entry *entry;
1617 	int i;
1618 
1619 	debugfs_create_x64("memory_scrub_val",
1620 				0644,
1621 				root,
1622 				&hdev->memory_scrub_val);
1623 
1624 	debugfs_create_file("memory_scrub",
1625 				0200,
1626 				root,
1627 				dev_entry,
1628 				&hl_mem_scrub_fops);
1629 
1630 	debugfs_create_x64("addr",
1631 				0644,
1632 				root,
1633 				&dev_entry->addr);
1634 
1635 	debugfs_create_file("data32",
1636 				0644,
1637 				root,
1638 				dev_entry,
1639 				&hl_data32b_fops);
1640 
1641 	debugfs_create_file("data64",
1642 				0644,
1643 				root,
1644 				dev_entry,
1645 				&hl_data64b_fops);
1646 
1647 	debugfs_create_file("set_power_state",
1648 				0644,
1649 				root,
1650 				dev_entry,
1651 				&hl_power_fops);
1652 
1653 	debugfs_create_file("device",
1654 				0644,
1655 				root,
1656 				dev_entry,
1657 				&hl_device_fops);
1658 
1659 	debugfs_create_file("clk_gate",
1660 				0644,
1661 				root,
1662 				dev_entry,
1663 				&hl_clk_gate_fops);
1664 
1665 	debugfs_create_file("stop_on_err",
1666 				0644,
1667 				root,
1668 				dev_entry,
1669 				&hl_stop_on_err_fops);
1670 
1671 	debugfs_create_file("dump_security_violations",
1672 				0400,
1673 				root,
1674 				dev_entry,
1675 				&hl_security_violations_fops);
1676 
1677 	debugfs_create_file("dump_razwi_events",
1678 				0400,
1679 				root,
1680 				dev_entry,
1681 				&hl_razwi_check_fops);
1682 
1683 	debugfs_create_file("dma_size",
1684 				0200,
1685 				root,
1686 				dev_entry,
1687 				&hl_dma_size_fops);
1688 
1689 	debugfs_create_blob("data_dma",
1690 				0400,
1691 				root,
1692 				&dev_entry->data_dma_blob_desc);
1693 
1694 	debugfs_create_file("monitor_dump_trig",
1695 				0200,
1696 				root,
1697 				dev_entry,
1698 				&hl_monitor_dump_fops);
1699 
1700 	debugfs_create_blob("monitor_dump",
1701 				0400,
1702 				root,
1703 				&dev_entry->mon_dump_blob_desc);
1704 
1705 	debugfs_create_x8("skip_reset_on_timeout",
1706 				0644,
1707 				root,
1708 				&hdev->reset_info.skip_reset_on_timeout);
1709 
1710 	debugfs_create_file("state_dump",
1711 				0644,
1712 				root,
1713 				dev_entry,
1714 				&hl_state_dump_fops);
1715 
1716 	debugfs_create_file("timeout_locked",
1717 				0644,
1718 				root,
1719 				dev_entry,
1720 				&hl_timeout_locked_fops);
1721 
1722 	debugfs_create_u32("device_release_watchdog_timeout",
1723 				0644,
1724 				root,
1725 				&hdev->device_release_watchdog_timeout_sec);
1726 
1727 	for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
1728 		debugfs_create_file(hl_debugfs_list[i].name,
1729 					0644,
1730 					root,
1731 					entry,
1732 					&hl_debugfs_fops);
1733 		entry->info_ent = &hl_debugfs_list[i];
1734 		entry->dev_entry = dev_entry;
1735 	}
1736 }
1737 
1738 int hl_debugfs_device_init(struct hl_device *hdev)
1739 {
1740 	struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1741 	int count = ARRAY_SIZE(hl_debugfs_list);
1742 
1743 	dev_entry->hdev = hdev;
1744 	dev_entry->entry_arr = kmalloc_array(count, sizeof(struct hl_debugfs_entry), GFP_KERNEL);
1745 	if (!dev_entry->entry_arr)
1746 		return -ENOMEM;
1747 
1748 	dev_entry->data_dma_blob_desc.size = 0;
1749 	dev_entry->data_dma_blob_desc.data = NULL;
1750 	dev_entry->mon_dump_blob_desc.size = 0;
1751 	dev_entry->mon_dump_blob_desc.data = NULL;
1752 
1753 	INIT_LIST_HEAD(&dev_entry->file_list);
1754 	INIT_LIST_HEAD(&dev_entry->cb_list);
1755 	INIT_LIST_HEAD(&dev_entry->cs_list);
1756 	INIT_LIST_HEAD(&dev_entry->cs_job_list);
1757 	INIT_LIST_HEAD(&dev_entry->userptr_list);
1758 	INIT_LIST_HEAD(&dev_entry->ctx_mem_hash_list);
1759 	mutex_init(&dev_entry->file_mutex);
1760 	init_rwsem(&dev_entry->state_dump_sem);
1761 	spin_lock_init(&dev_entry->cb_spinlock);
1762 	spin_lock_init(&dev_entry->cs_spinlock);
1763 	spin_lock_init(&dev_entry->cs_job_spinlock);
1764 	spin_lock_init(&dev_entry->userptr_spinlock);
1765 	mutex_init(&dev_entry->ctx_mem_hash_mutex);
1766 
1767 	return 0;
1768 }
1769 
1770 void hl_debugfs_device_fini(struct hl_device *hdev)
1771 {
1772 	struct hl_dbg_device_entry *entry = &hdev->hl_debugfs;
1773 	int i;
1774 
1775 	mutex_destroy(&entry->ctx_mem_hash_mutex);
1776 	mutex_destroy(&entry->file_mutex);
1777 
1778 	vfree(entry->data_dma_blob_desc.data);
1779 	vfree(entry->mon_dump_blob_desc.data);
1780 
1781 	for (i = 0; i < ARRAY_SIZE(entry->state_dump); ++i)
1782 		vfree(entry->state_dump[i]);
1783 
1784 	kfree(entry->entry_arr);
1785 }
1786 
1787 void hl_debugfs_add_device(struct hl_device *hdev)
1788 {
1789 	struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1790 
1791 	dev_entry->root = debugfs_create_dir(dev_name(hdev->dev), hl_debug_root);
1792 
1793 	add_files_to_device(hdev, dev_entry, dev_entry->root);
1794 	if (!hdev->asic_prop.fw_security_enabled)
1795 		add_secured_nodes(dev_entry, dev_entry->root);
1796 }
1797 
1798 void hl_debugfs_remove_device(struct hl_device *hdev)
1799 {
1800 	struct hl_dbg_device_entry *entry = &hdev->hl_debugfs;
1801 
1802 	debugfs_remove_recursive(entry->root);
1803 }
1804 
1805 void hl_debugfs_add_file(struct hl_fpriv *hpriv)
1806 {
1807 	struct hl_dbg_device_entry *dev_entry = &hpriv->hdev->hl_debugfs;
1808 
1809 	mutex_lock(&dev_entry->file_mutex);
1810 	list_add(&hpriv->debugfs_list, &dev_entry->file_list);
1811 	mutex_unlock(&dev_entry->file_mutex);
1812 }
1813 
1814 void hl_debugfs_remove_file(struct hl_fpriv *hpriv)
1815 {
1816 	struct hl_dbg_device_entry *dev_entry = &hpriv->hdev->hl_debugfs;
1817 
1818 	mutex_lock(&dev_entry->file_mutex);
1819 	list_del(&hpriv->debugfs_list);
1820 	mutex_unlock(&dev_entry->file_mutex);
1821 }
1822 
1823 void hl_debugfs_add_cb(struct hl_cb *cb)
1824 {
1825 	struct hl_dbg_device_entry *dev_entry = &cb->hdev->hl_debugfs;
1826 
1827 	spin_lock(&dev_entry->cb_spinlock);
1828 	list_add(&cb->debugfs_list, &dev_entry->cb_list);
1829 	spin_unlock(&dev_entry->cb_spinlock);
1830 }
1831 
1832 void hl_debugfs_remove_cb(struct hl_cb *cb)
1833 {
1834 	struct hl_dbg_device_entry *dev_entry = &cb->hdev->hl_debugfs;
1835 
1836 	spin_lock(&dev_entry->cb_spinlock);
1837 	list_del(&cb->debugfs_list);
1838 	spin_unlock(&dev_entry->cb_spinlock);
1839 }
1840 
1841 void hl_debugfs_add_cs(struct hl_cs *cs)
1842 {
1843 	struct hl_dbg_device_entry *dev_entry = &cs->ctx->hdev->hl_debugfs;
1844 
1845 	spin_lock(&dev_entry->cs_spinlock);
1846 	list_add(&cs->debugfs_list, &dev_entry->cs_list);
1847 	spin_unlock(&dev_entry->cs_spinlock);
1848 }
1849 
1850 void hl_debugfs_remove_cs(struct hl_cs *cs)
1851 {
1852 	struct hl_dbg_device_entry *dev_entry = &cs->ctx->hdev->hl_debugfs;
1853 
1854 	spin_lock(&dev_entry->cs_spinlock);
1855 	list_del(&cs->debugfs_list);
1856 	spin_unlock(&dev_entry->cs_spinlock);
1857 }
1858 
1859 void hl_debugfs_add_job(struct hl_device *hdev, struct hl_cs_job *job)
1860 {
1861 	struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1862 
1863 	spin_lock(&dev_entry->cs_job_spinlock);
1864 	list_add(&job->debugfs_list, &dev_entry->cs_job_list);
1865 	spin_unlock(&dev_entry->cs_job_spinlock);
1866 }
1867 
1868 void hl_debugfs_remove_job(struct hl_device *hdev, struct hl_cs_job *job)
1869 {
1870 	struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1871 
1872 	spin_lock(&dev_entry->cs_job_spinlock);
1873 	list_del(&job->debugfs_list);
1874 	spin_unlock(&dev_entry->cs_job_spinlock);
1875 }
1876 
1877 void hl_debugfs_add_userptr(struct hl_device *hdev, struct hl_userptr *userptr)
1878 {
1879 	struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1880 
1881 	spin_lock(&dev_entry->userptr_spinlock);
1882 	list_add(&userptr->debugfs_list, &dev_entry->userptr_list);
1883 	spin_unlock(&dev_entry->userptr_spinlock);
1884 }
1885 
1886 void hl_debugfs_remove_userptr(struct hl_device *hdev,
1887 				struct hl_userptr *userptr)
1888 {
1889 	struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1890 
1891 	spin_lock(&dev_entry->userptr_spinlock);
1892 	list_del(&userptr->debugfs_list);
1893 	spin_unlock(&dev_entry->userptr_spinlock);
1894 }
1895 
1896 void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
1897 {
1898 	struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1899 
1900 	mutex_lock(&dev_entry->ctx_mem_hash_mutex);
1901 	list_add(&ctx->debugfs_list, &dev_entry->ctx_mem_hash_list);
1902 	mutex_unlock(&dev_entry->ctx_mem_hash_mutex);
1903 }
1904 
1905 void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
1906 {
1907 	struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1908 
1909 	mutex_lock(&dev_entry->ctx_mem_hash_mutex);
1910 	list_del(&ctx->debugfs_list);
1911 	mutex_unlock(&dev_entry->ctx_mem_hash_mutex);
1912 }
1913 
1914 /**
1915  * hl_debugfs_set_state_dump - register state dump making it accessible via
1916  *                             debugfs
1917  * @hdev: pointer to the device structure
1918  * @data: the actual dump data
1919  * @length: the length of the data
1920  */
1921 void hl_debugfs_set_state_dump(struct hl_device *hdev, char *data,
1922 					unsigned long length)
1923 {
1924 	struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1925 
1926 	down_write(&dev_entry->state_dump_sem);
1927 
1928 	dev_entry->state_dump_head = (dev_entry->state_dump_head + 1) %
1929 					ARRAY_SIZE(dev_entry->state_dump);
1930 	vfree(dev_entry->state_dump[dev_entry->state_dump_head]);
1931 	dev_entry->state_dump[dev_entry->state_dump_head] = data;
1932 
1933 	up_write(&dev_entry->state_dump_sem);
1934 }
1935 
1936 void __init hl_debugfs_init(void)
1937 {
1938 	hl_debug_root = debugfs_create_dir("habanalabs", NULL);
1939 }
1940 
1941 void hl_debugfs_fini(void)
1942 {
1943 	debugfs_remove_recursive(hl_debug_root);
1944 }
1945