1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Copyright 2016-2021 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7 
8 #include "habanalabs.h"
9 #include "../include/hw_ip/mmu/mmu_general.h"
10 
11 #include <linux/pci.h>
12 #include <linux/uaccess.h>
13 #include <linux/vmalloc.h>
14 #include <linux/iommu.h>
15 
16 #define MMU_ADDR_BUF_SIZE	40
17 #define MMU_ASID_BUF_SIZE	10
18 #define MMU_KBUF_SIZE		(MMU_ADDR_BUF_SIZE + MMU_ASID_BUF_SIZE)
19 #define I2C_MAX_TRANSACTION_LEN	8
20 
21 static struct dentry *hl_debug_root;
22 
23 static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
24 				u8 i2c_reg, u8 i2c_len, u64 *val)
25 {
26 	struct cpucp_packet pkt;
27 	int rc;
28 
29 	if (!hl_device_operational(hdev, NULL))
30 		return -EBUSY;
31 
32 	if (i2c_len > I2C_MAX_TRANSACTION_LEN) {
33 		dev_err(hdev->dev, "I2C transaction length %u, exceeds maximum of %u\n",
34 				i2c_len, I2C_MAX_TRANSACTION_LEN);
35 		return -EINVAL;
36 	}
37 
38 	memset(&pkt, 0, sizeof(pkt));
39 
40 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_I2C_RD <<
41 				CPUCP_PKT_CTL_OPCODE_SHIFT);
42 	pkt.i2c_bus = i2c_bus;
43 	pkt.i2c_addr = i2c_addr;
44 	pkt.i2c_reg = i2c_reg;
45 	pkt.i2c_len = i2c_len;
46 
47 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
48 						0, val);
49 	if (rc)
50 		dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc);
51 
52 	return rc;
53 }
54 
55 static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
56 				u8 i2c_reg, u8 i2c_len, u64 val)
57 {
58 	struct cpucp_packet pkt;
59 	int rc;
60 
61 	if (!hl_device_operational(hdev, NULL))
62 		return -EBUSY;
63 
64 	if (i2c_len > I2C_MAX_TRANSACTION_LEN) {
65 		dev_err(hdev->dev, "I2C transaction length %u, exceeds maximum of %u\n",
66 				i2c_len, I2C_MAX_TRANSACTION_LEN);
67 		return -EINVAL;
68 	}
69 
70 	memset(&pkt, 0, sizeof(pkt));
71 
72 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_I2C_WR <<
73 				CPUCP_PKT_CTL_OPCODE_SHIFT);
74 	pkt.i2c_bus = i2c_bus;
75 	pkt.i2c_addr = i2c_addr;
76 	pkt.i2c_reg = i2c_reg;
77 	pkt.i2c_len = i2c_len;
78 	pkt.value = cpu_to_le64(val);
79 
80 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
81 						0, NULL);
82 
83 	if (rc)
84 		dev_err(hdev->dev, "Failed to write to I2C, error %d\n", rc);
85 
86 	return rc;
87 }
88 
89 static void hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state)
90 {
91 	struct cpucp_packet pkt;
92 	int rc;
93 
94 	if (!hl_device_operational(hdev, NULL))
95 		return;
96 
97 	memset(&pkt, 0, sizeof(pkt));
98 
99 	pkt.ctl = cpu_to_le32(CPUCP_PACKET_LED_SET <<
100 				CPUCP_PKT_CTL_OPCODE_SHIFT);
101 	pkt.led_index = cpu_to_le32(led);
102 	pkt.value = cpu_to_le64(state);
103 
104 	rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
105 						0, NULL);
106 
107 	if (rc)
108 		dev_err(hdev->dev, "Failed to set LED %d, error %d\n", led, rc);
109 }
110 
111 static int command_buffers_show(struct seq_file *s, void *data)
112 {
113 	struct hl_debugfs_entry *entry = s->private;
114 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
115 	struct hl_cb *cb;
116 	bool first = true;
117 
118 	spin_lock(&dev_entry->cb_spinlock);
119 
120 	list_for_each_entry(cb, &dev_entry->cb_list, debugfs_list) {
121 		if (first) {
122 			first = false;
123 			seq_puts(s, "\n");
124 			seq_puts(s, " CB ID   CTX ID   CB size    CB RefCnt    mmap?   CS counter\n");
125 			seq_puts(s, "---------------------------------------------------------------\n");
126 		}
127 		seq_printf(s,
128 			"   %03llu        %d    0x%08x      %d          %d          %d\n",
129 			cb->buf->handle, cb->ctx->asid, cb->size,
130 			kref_read(&cb->buf->refcount),
131 			atomic_read(&cb->buf->mmap), atomic_read(&cb->cs_cnt));
132 	}
133 
134 	spin_unlock(&dev_entry->cb_spinlock);
135 
136 	if (!first)
137 		seq_puts(s, "\n");
138 
139 	return 0;
140 }
141 
142 static int command_submission_show(struct seq_file *s, void *data)
143 {
144 	struct hl_debugfs_entry *entry = s->private;
145 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
146 	struct hl_cs *cs;
147 	bool first = true;
148 
149 	spin_lock(&dev_entry->cs_spinlock);
150 
151 	list_for_each_entry(cs, &dev_entry->cs_list, debugfs_list) {
152 		if (first) {
153 			first = false;
154 			seq_puts(s, "\n");
155 			seq_puts(s, " CS ID   CS TYPE   CTX ASID   CS RefCnt   Submitted    Completed\n");
156 			seq_puts(s, "----------------------------------------------------------------\n");
157 		}
158 		seq_printf(s,
159 			"   %llu        %d          %d          %d           %d            %d\n",
160 			cs->sequence, cs->type, cs->ctx->asid,
161 			kref_read(&cs->refcount),
162 			cs->submitted, cs->completed);
163 	}
164 
165 	spin_unlock(&dev_entry->cs_spinlock);
166 
167 	if (!first)
168 		seq_puts(s, "\n");
169 
170 	return 0;
171 }
172 
173 static int command_submission_jobs_show(struct seq_file *s, void *data)
174 {
175 	struct hl_debugfs_entry *entry = s->private;
176 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
177 	struct hl_cs_job *job;
178 	bool first = true;
179 
180 	spin_lock(&dev_entry->cs_job_spinlock);
181 
182 	list_for_each_entry(job, &dev_entry->cs_job_list, debugfs_list) {
183 		if (first) {
184 			first = false;
185 			seq_puts(s, "\n");
186 			seq_puts(s, " JOB ID   CS ID    CS TYPE    CTX ASID   JOB RefCnt   H/W Queue\n");
187 			seq_puts(s, "---------------------------------------------------------------\n");
188 		}
189 		if (job->cs)
190 			seq_printf(s,
191 				"   %02d      %llu        %d        %d          %d           %d\n",
192 				job->id, job->cs->sequence, job->cs->type,
193 				job->cs->ctx->asid, kref_read(&job->refcount),
194 				job->hw_queue_id);
195 		else
196 			seq_printf(s,
197 				"   %02d      0        0        %d          %d           %d\n",
198 				job->id, HL_KERNEL_ASID_ID,
199 				kref_read(&job->refcount), job->hw_queue_id);
200 	}
201 
202 	spin_unlock(&dev_entry->cs_job_spinlock);
203 
204 	if (!first)
205 		seq_puts(s, "\n");
206 
207 	return 0;
208 }
209 
210 static int userptr_show(struct seq_file *s, void *data)
211 {
212 	struct hl_debugfs_entry *entry = s->private;
213 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
214 	struct hl_userptr *userptr;
215 	char dma_dir[4][30] = {"DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
216 				"DMA_FROM_DEVICE", "DMA_NONE"};
217 	bool first = true;
218 
219 	spin_lock(&dev_entry->userptr_spinlock);
220 
221 	list_for_each_entry(userptr, &dev_entry->userptr_list, debugfs_list) {
222 		if (first) {
223 			first = false;
224 			seq_puts(s, "\n");
225 			seq_puts(s, " pid      user virtual address     size             dma dir\n");
226 			seq_puts(s, "----------------------------------------------------------\n");
227 		}
228 		seq_printf(s, " %-7d  0x%-14llx      %-10llu    %-30s\n",
229 				userptr->pid, userptr->addr, userptr->size,
230 				dma_dir[userptr->dir]);
231 	}
232 
233 	spin_unlock(&dev_entry->userptr_spinlock);
234 
235 	if (!first)
236 		seq_puts(s, "\n");
237 
238 	return 0;
239 }
240 
241 static int vm_show(struct seq_file *s, void *data)
242 {
243 	struct hl_debugfs_entry *entry = s->private;
244 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
245 	struct hl_vm_hw_block_list_node *lnode;
246 	struct hl_ctx *ctx;
247 	struct hl_vm *vm;
248 	struct hl_vm_hash_node *hnode;
249 	struct hl_userptr *userptr;
250 	struct hl_vm_phys_pg_pack *phys_pg_pack = NULL;
251 	struct hl_va_range *va_range;
252 	struct hl_vm_va_block *va_block;
253 	enum vm_type *vm_type;
254 	bool once = true;
255 	u64 j;
256 	int i;
257 
258 	if (!dev_entry->hdev->mmu_enable)
259 		return 0;
260 
261 	mutex_lock(&dev_entry->ctx_mem_hash_mutex);
262 
263 	list_for_each_entry(ctx, &dev_entry->ctx_mem_hash_list, debugfs_list) {
264 		once = false;
265 		seq_puts(s, "\n\n----------------------------------------------------");
266 		seq_puts(s, "\n----------------------------------------------------\n\n");
267 		seq_printf(s, "ctx asid: %u\n", ctx->asid);
268 
269 		seq_puts(s, "\nmappings:\n\n");
270 		seq_puts(s, "    virtual address        size          handle\n");
271 		seq_puts(s, "----------------------------------------------------\n");
272 		mutex_lock(&ctx->mem_hash_lock);
273 		hash_for_each(ctx->mem_hash, i, hnode, node) {
274 			vm_type = hnode->ptr;
275 
276 			if (*vm_type == VM_TYPE_USERPTR) {
277 				userptr = hnode->ptr;
278 				seq_printf(s,
279 					"    0x%-14llx      %-10llu\n",
280 					hnode->vaddr, userptr->size);
281 			} else {
282 				phys_pg_pack = hnode->ptr;
283 				seq_printf(s,
284 					"    0x%-14llx      %-10llu       %-4u\n",
285 					hnode->vaddr, phys_pg_pack->total_size,
286 					phys_pg_pack->handle);
287 			}
288 		}
289 		mutex_unlock(&ctx->mem_hash_lock);
290 
291 		if (ctx->asid != HL_KERNEL_ASID_ID &&
292 		    !list_empty(&ctx->hw_block_mem_list)) {
293 			seq_puts(s, "\nhw_block mappings:\n\n");
294 			seq_puts(s,
295 				"    virtual address    block size    mapped size    HW block id\n");
296 			seq_puts(s,
297 				"---------------------------------------------------------------\n");
298 			mutex_lock(&ctx->hw_block_list_lock);
299 			list_for_each_entry(lnode, &ctx->hw_block_mem_list, node) {
300 				seq_printf(s,
301 					"    0x%-14lx   %-6u        %-6u             %-9u\n",
302 					lnode->vaddr, lnode->block_size, lnode->mapped_size,
303 					lnode->id);
304 			}
305 			mutex_unlock(&ctx->hw_block_list_lock);
306 		}
307 
308 		vm = &ctx->hdev->vm;
309 		spin_lock(&vm->idr_lock);
310 
311 		if (!idr_is_empty(&vm->phys_pg_pack_handles))
312 			seq_puts(s, "\n\nallocations:\n");
313 
314 		idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_pack, i) {
315 			if (phys_pg_pack->asid != ctx->asid)
316 				continue;
317 
318 			seq_printf(s, "\nhandle: %u\n", phys_pg_pack->handle);
319 			seq_printf(s, "page size: %u\n\n",
320 						phys_pg_pack->page_size);
321 			seq_puts(s, "   physical address\n");
322 			seq_puts(s, "---------------------\n");
323 			for (j = 0 ; j < phys_pg_pack->npages ; j++) {
324 				seq_printf(s, "    0x%-14llx\n",
325 						phys_pg_pack->pages[j]);
326 			}
327 		}
328 		spin_unlock(&vm->idr_lock);
329 
330 	}
331 
332 	mutex_unlock(&dev_entry->ctx_mem_hash_mutex);
333 
334 	ctx = hl_get_compute_ctx(dev_entry->hdev);
335 	if (ctx) {
336 		seq_puts(s, "\nVA ranges:\n\n");
337 		for (i = HL_VA_RANGE_TYPE_HOST ; i < HL_VA_RANGE_TYPE_MAX ; ++i) {
338 			va_range = ctx->va_range[i];
339 			seq_printf(s, "   va_range %d\n", i);
340 			seq_puts(s, "---------------------\n");
341 			mutex_lock(&va_range->lock);
342 			list_for_each_entry(va_block, &va_range->list, node) {
343 				seq_printf(s, "%#16llx - %#16llx (%#llx)\n",
344 					   va_block->start, va_block->end,
345 					   va_block->size);
346 			}
347 			mutex_unlock(&va_range->lock);
348 			seq_puts(s, "\n");
349 		}
350 		hl_ctx_put(ctx);
351 	}
352 
353 	if (!once)
354 		seq_puts(s, "\n");
355 
356 	return 0;
357 }
358 
359 static int userptr_lookup_show(struct seq_file *s, void *data)
360 {
361 	struct hl_debugfs_entry *entry = s->private;
362 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
363 	struct scatterlist *sg;
364 	struct hl_userptr *userptr;
365 	bool first = true;
366 	u64 total_npages, npages, sg_start, sg_end;
367 	dma_addr_t dma_addr;
368 	int i;
369 
370 	spin_lock(&dev_entry->userptr_spinlock);
371 
372 	list_for_each_entry(userptr, &dev_entry->userptr_list, debugfs_list) {
373 		if (dev_entry->userptr_lookup >= userptr->addr &&
374 		dev_entry->userptr_lookup < userptr->addr + userptr->size) {
375 			total_npages = 0;
376 			for_each_sgtable_dma_sg(userptr->sgt, sg, i) {
377 				npages = hl_get_sg_info(sg, &dma_addr);
378 				sg_start = userptr->addr +
379 					total_npages * PAGE_SIZE;
380 				sg_end = userptr->addr +
381 					(total_npages + npages) * PAGE_SIZE;
382 
383 				if (dev_entry->userptr_lookup >= sg_start &&
384 				    dev_entry->userptr_lookup < sg_end) {
385 					dma_addr += (dev_entry->userptr_lookup -
386 							sg_start);
387 					if (first) {
388 						first = false;
389 						seq_puts(s, "\n");
390 						seq_puts(s, " user virtual address         dma address       pid        region start     region size\n");
391 						seq_puts(s, "---------------------------------------------------------------------------------------\n");
392 					}
393 					seq_printf(s, " 0x%-18llx  0x%-16llx  %-8u  0x%-16llx %-12llu\n",
394 						dev_entry->userptr_lookup,
395 						(u64)dma_addr, userptr->pid,
396 						userptr->addr, userptr->size);
397 				}
398 				total_npages += npages;
399 			}
400 		}
401 	}
402 
403 	spin_unlock(&dev_entry->userptr_spinlock);
404 
405 	if (!first)
406 		seq_puts(s, "\n");
407 
408 	return 0;
409 }
410 
411 static ssize_t userptr_lookup_write(struct file *file, const char __user *buf,
412 		size_t count, loff_t *f_pos)
413 {
414 	struct seq_file *s = file->private_data;
415 	struct hl_debugfs_entry *entry = s->private;
416 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
417 	ssize_t rc;
418 	u64 value;
419 
420 	rc = kstrtoull_from_user(buf, count, 16, &value);
421 	if (rc)
422 		return rc;
423 
424 	dev_entry->userptr_lookup = value;
425 
426 	return count;
427 }
428 
429 static int mmu_show(struct seq_file *s, void *data)
430 {
431 	struct hl_debugfs_entry *entry = s->private;
432 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
433 	struct hl_device *hdev = dev_entry->hdev;
434 	struct hl_ctx *ctx;
435 	struct hl_mmu_hop_info hops_info = {0};
436 	u64 virt_addr = dev_entry->mmu_addr, phys_addr;
437 	int i;
438 
439 	if (!hdev->mmu_enable)
440 		return 0;
441 
442 	if (dev_entry->mmu_asid == HL_KERNEL_ASID_ID)
443 		ctx = hdev->kernel_ctx;
444 	else
445 		ctx = hl_get_compute_ctx(hdev);
446 
447 	if (!ctx) {
448 		dev_err(hdev->dev, "no ctx available\n");
449 		return 0;
450 	}
451 
452 	if (hl_mmu_get_tlb_info(ctx, virt_addr, &hops_info)) {
453 		dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n",
454 				virt_addr);
455 		goto put_ctx;
456 	}
457 
458 	hl_mmu_va_to_pa(ctx, virt_addr, &phys_addr);
459 
460 	if (hops_info.scrambled_vaddr &&
461 		(dev_entry->mmu_addr != hops_info.scrambled_vaddr))
462 		seq_printf(s,
463 			"asid: %u, virt_addr: 0x%llx, scrambled virt_addr: 0x%llx,\nphys_addr: 0x%llx, scrambled_phys_addr: 0x%llx\n",
464 			dev_entry->mmu_asid, dev_entry->mmu_addr,
465 			hops_info.scrambled_vaddr,
466 			hops_info.unscrambled_paddr, phys_addr);
467 	else
468 		seq_printf(s,
469 			"asid: %u, virt_addr: 0x%llx, phys_addr: 0x%llx\n",
470 			dev_entry->mmu_asid, dev_entry->mmu_addr, phys_addr);
471 
472 	for (i = 0 ; i < hops_info.used_hops ; i++) {
473 		seq_printf(s, "hop%d_addr: 0x%llx\n",
474 				i, hops_info.hop_info[i].hop_addr);
475 		seq_printf(s, "hop%d_pte_addr: 0x%llx\n",
476 				i, hops_info.hop_info[i].hop_pte_addr);
477 		seq_printf(s, "hop%d_pte: 0x%llx\n",
478 				i, hops_info.hop_info[i].hop_pte_val);
479 	}
480 
481 put_ctx:
482 	if (dev_entry->mmu_asid != HL_KERNEL_ASID_ID)
483 		hl_ctx_put(ctx);
484 
485 	return 0;
486 }
487 
488 static ssize_t mmu_asid_va_write(struct file *file, const char __user *buf,
489 		size_t count, loff_t *f_pos)
490 {
491 	struct seq_file *s = file->private_data;
492 	struct hl_debugfs_entry *entry = s->private;
493 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
494 	struct hl_device *hdev = dev_entry->hdev;
495 	char kbuf[MMU_KBUF_SIZE];
496 	char *c;
497 	ssize_t rc;
498 
499 	if (!hdev->mmu_enable)
500 		return count;
501 
502 	if (count > sizeof(kbuf) - 1)
503 		goto err;
504 	if (copy_from_user(kbuf, buf, count))
505 		goto err;
506 	kbuf[count] = 0;
507 
508 	c = strchr(kbuf, ' ');
509 	if (!c)
510 		goto err;
511 	*c = '\0';
512 
513 	rc = kstrtouint(kbuf, 10, &dev_entry->mmu_asid);
514 	if (rc)
515 		goto err;
516 
517 	if (strncmp(c+1, "0x", 2))
518 		goto err;
519 	rc = kstrtoull(c+3, 16, &dev_entry->mmu_addr);
520 	if (rc)
521 		goto err;
522 
523 	return count;
524 
525 err:
526 	dev_err(hdev->dev, "usage: echo <asid> <0xaddr> > mmu\n");
527 
528 	return -EINVAL;
529 }
530 
531 static int mmu_ack_error(struct seq_file *s, void *data)
532 {
533 	struct hl_debugfs_entry *entry = s->private;
534 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
535 	struct hl_device *hdev = dev_entry->hdev;
536 	int rc;
537 
538 	if (!hdev->mmu_enable)
539 		return 0;
540 
541 	if (!dev_entry->mmu_cap_mask) {
542 		dev_err(hdev->dev, "mmu_cap_mask is not set\n");
543 		goto err;
544 	}
545 
546 	rc = hdev->asic_funcs->ack_mmu_errors(hdev, dev_entry->mmu_cap_mask);
547 	if (rc)
548 		goto err;
549 
550 	return 0;
551 err:
552 	return -EINVAL;
553 }
554 
555 static ssize_t mmu_ack_error_value_write(struct file *file,
556 		const char __user *buf,
557 		size_t count, loff_t *f_pos)
558 {
559 	struct seq_file *s = file->private_data;
560 	struct hl_debugfs_entry *entry = s->private;
561 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
562 	struct hl_device *hdev = dev_entry->hdev;
563 	char kbuf[MMU_KBUF_SIZE];
564 	ssize_t rc;
565 
566 	if (!hdev->mmu_enable)
567 		return count;
568 
569 	if (count > sizeof(kbuf) - 1)
570 		goto err;
571 
572 	if (copy_from_user(kbuf, buf, count))
573 		goto err;
574 
575 	kbuf[count] = 0;
576 
577 	if (strncmp(kbuf, "0x", 2))
578 		goto err;
579 
580 	rc = kstrtoull(kbuf, 16, &dev_entry->mmu_cap_mask);
581 	if (rc)
582 		goto err;
583 
584 	return count;
585 err:
586 	dev_err(hdev->dev, "usage: echo <0xmmu_cap_mask > > mmu_error\n");
587 
588 	return -EINVAL;
589 }
590 
591 static int engines_show(struct seq_file *s, void *data)
592 {
593 	struct hl_debugfs_entry *entry = s->private;
594 	struct hl_dbg_device_entry *dev_entry = entry->dev_entry;
595 	struct hl_device *hdev = dev_entry->hdev;
596 	struct engines_data eng_data;
597 
598 	if (hdev->reset_info.in_reset) {
599 		dev_warn_ratelimited(hdev->dev,
600 				"Can't check device idle during reset\n");
601 		return 0;
602 	}
603 
604 	eng_data.actual_size = 0;
605 	eng_data.allocated_buf_size = HL_ENGINES_DATA_MAX_SIZE;
606 	eng_data.buf = vmalloc(eng_data.allocated_buf_size);
607 	if (!eng_data.buf)
608 		return -ENOMEM;
609 
610 	hdev->asic_funcs->is_device_idle(hdev, NULL, 0, &eng_data);
611 
612 	if (eng_data.actual_size > eng_data.allocated_buf_size) {
613 		dev_err(hdev->dev,
614 				"Engines data size (%d Bytes) is bigger than allocated size (%u Bytes)\n",
615 				eng_data.actual_size, eng_data.allocated_buf_size);
616 		vfree(eng_data.buf);
617 		return -ENOMEM;
618 	}
619 
620 	seq_write(s, eng_data.buf, eng_data.actual_size);
621 
622 	vfree(eng_data.buf);
623 
624 	return 0;
625 }
626 
627 static ssize_t hl_memory_scrub(struct file *f, const char __user *buf,
628 					size_t count, loff_t *ppos)
629 {
630 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
631 	struct hl_device *hdev = entry->hdev;
632 	u64 val = hdev->memory_scrub_val;
633 	int rc;
634 
635 	if (!hl_device_operational(hdev, NULL)) {
636 		dev_warn_ratelimited(hdev->dev, "Can't scrub memory, device is not operational\n");
637 		return -EIO;
638 	}
639 
640 	mutex_lock(&hdev->fpriv_list_lock);
641 	if (hdev->is_compute_ctx_active) {
642 		mutex_unlock(&hdev->fpriv_list_lock);
643 		dev_err(hdev->dev, "can't scrub dram, context exist\n");
644 		return -EBUSY;
645 	}
646 	hdev->is_in_dram_scrub = true;
647 	mutex_unlock(&hdev->fpriv_list_lock);
648 
649 	rc = hdev->asic_funcs->scrub_device_dram(hdev, val);
650 
651 	mutex_lock(&hdev->fpriv_list_lock);
652 	hdev->is_in_dram_scrub = false;
653 	mutex_unlock(&hdev->fpriv_list_lock);
654 
655 	if (rc)
656 		return rc;
657 	return count;
658 }
659 
660 static bool hl_is_device_va(struct hl_device *hdev, u64 addr)
661 {
662 	struct asic_fixed_properties *prop = &hdev->asic_prop;
663 
664 	if (!hdev->mmu_enable)
665 		goto out;
666 
667 	if (prop->dram_supports_virtual_memory &&
668 		(addr >= prop->dmmu.start_addr && addr < prop->dmmu.end_addr))
669 		return true;
670 
671 	if (addr >= prop->pmmu.start_addr &&
672 		addr < prop->pmmu.end_addr)
673 		return true;
674 
675 	if (addr >= prop->pmmu_huge.start_addr &&
676 		addr < prop->pmmu_huge.end_addr)
677 		return true;
678 out:
679 	return false;
680 }
681 
682 static bool hl_is_device_internal_memory_va(struct hl_device *hdev, u64 addr,
683 						u32 size)
684 {
685 	struct asic_fixed_properties *prop = &hdev->asic_prop;
686 	u64 dram_start_addr, dram_end_addr;
687 
688 	if (!hdev->mmu_enable)
689 		return false;
690 
691 	if (prop->dram_supports_virtual_memory) {
692 		dram_start_addr = prop->dmmu.start_addr;
693 		dram_end_addr = prop->dmmu.end_addr;
694 	} else {
695 		dram_start_addr = prop->dram_base_address;
696 		dram_end_addr = prop->dram_end_address;
697 	}
698 
699 	if (hl_mem_area_inside_range(addr, size, dram_start_addr,
700 					dram_end_addr))
701 		return true;
702 
703 	if (hl_mem_area_inside_range(addr, size, prop->sram_base_address,
704 					prop->sram_end_address))
705 		return true;
706 
707 	return false;
708 }
709 
710 static int device_va_to_pa(struct hl_device *hdev, u64 virt_addr, u32 size,
711 			u64 *phys_addr)
712 {
713 	struct hl_vm_phys_pg_pack *phys_pg_pack;
714 	struct hl_ctx *ctx;
715 	struct hl_vm_hash_node *hnode;
716 	u64 end_address, range_size;
717 	struct hl_userptr *userptr;
718 	enum vm_type *vm_type;
719 	bool valid = false;
720 	int i, rc = 0;
721 
722 	ctx = hl_get_compute_ctx(hdev);
723 
724 	if (!ctx) {
725 		dev_err(hdev->dev, "no ctx available\n");
726 		return -EINVAL;
727 	}
728 
729 	/* Verify address is mapped */
730 	mutex_lock(&ctx->mem_hash_lock);
731 	hash_for_each(ctx->mem_hash, i, hnode, node) {
732 		vm_type = hnode->ptr;
733 
734 		if (*vm_type == VM_TYPE_USERPTR) {
735 			userptr = hnode->ptr;
736 			range_size = userptr->size;
737 		} else {
738 			phys_pg_pack = hnode->ptr;
739 			range_size = phys_pg_pack->total_size;
740 		}
741 
742 		end_address = virt_addr + size;
743 		if ((virt_addr >= hnode->vaddr) &&
744 				(end_address <= hnode->vaddr + range_size)) {
745 			valid = true;
746 			break;
747 		}
748 	}
749 	mutex_unlock(&ctx->mem_hash_lock);
750 
751 	if (!valid) {
752 		dev_err(hdev->dev,
753 			"virt addr 0x%llx is not mapped\n",
754 			virt_addr);
755 		rc = -EINVAL;
756 		goto put_ctx;
757 	}
758 
759 	rc = hl_mmu_va_to_pa(ctx, virt_addr, phys_addr);
760 	if (rc) {
761 		dev_err(hdev->dev,
762 			"virt addr 0x%llx is not mapped to phys addr\n",
763 			virt_addr);
764 		rc = -EINVAL;
765 	}
766 
767 put_ctx:
768 	hl_ctx_put(ctx);
769 
770 	return rc;
771 }
772 
773 static int hl_access_dev_mem_by_region(struct hl_device *hdev, u64 addr,
774 		u64 *val, enum debugfs_access_type acc_type, bool *found)
775 {
776 	size_t acc_size = (acc_type == DEBUGFS_READ64 || acc_type == DEBUGFS_WRITE64) ?
777 		sizeof(u64) : sizeof(u32);
778 	struct pci_mem_region *mem_reg;
779 	int i;
780 
781 	for (i = 0; i < PCI_REGION_NUMBER; i++) {
782 		mem_reg = &hdev->pci_mem_region[i];
783 		if (!mem_reg->used)
784 			continue;
785 		if (addr >= mem_reg->region_base &&
786 			addr <= mem_reg->region_base + mem_reg->region_size - acc_size) {
787 			*found = true;
788 			return hdev->asic_funcs->access_dev_mem(hdev, i, addr, val, acc_type);
789 		}
790 	}
791 	return 0;
792 }
793 
794 static void hl_access_host_mem(struct hl_device *hdev, u64 addr, u64 *val,
795 		enum debugfs_access_type acc_type)
796 {
797 	struct asic_fixed_properties *prop = &hdev->asic_prop;
798 	u64 offset = prop->device_dma_offset_for_host_access;
799 
800 	switch (acc_type) {
801 	case DEBUGFS_READ32:
802 		*val = *(u32 *) phys_to_virt(addr - offset);
803 		break;
804 	case DEBUGFS_WRITE32:
805 		*(u32 *) phys_to_virt(addr - offset) = *val;
806 		break;
807 	case DEBUGFS_READ64:
808 		*val = *(u64 *) phys_to_virt(addr - offset);
809 		break;
810 	case DEBUGFS_WRITE64:
811 		*(u64 *) phys_to_virt(addr - offset) = *val;
812 		break;
813 	default:
814 		dev_err(hdev->dev, "hostmem access-type %d id not supported\n", acc_type);
815 		break;
816 	}
817 }
818 
819 static int hl_access_mem(struct hl_device *hdev, u64 addr, u64 *val,
820 				enum debugfs_access_type acc_type)
821 {
822 	size_t acc_size = (acc_type == DEBUGFS_READ64 || acc_type == DEBUGFS_WRITE64) ?
823 		sizeof(u64) : sizeof(u32);
824 	u64 host_start = hdev->asic_prop.host_base_address;
825 	u64 host_end = hdev->asic_prop.host_end_address;
826 	bool user_address, found = false;
827 	int rc;
828 
829 	user_address = hl_is_device_va(hdev, addr);
830 	if (user_address) {
831 		rc = device_va_to_pa(hdev, addr, acc_size, &addr);
832 		if (rc)
833 			return rc;
834 	}
835 
836 	rc = hl_access_dev_mem_by_region(hdev, addr, val, acc_type, &found);
837 	if (rc) {
838 		dev_err(hdev->dev,
839 			"Failed reading addr %#llx from dev mem (%d)\n",
840 			addr, rc);
841 		return rc;
842 	}
843 
844 	if (found)
845 		return 0;
846 
847 	if (!user_address || device_iommu_mapped(&hdev->pdev->dev)) {
848 		rc = -EINVAL;
849 		goto err;
850 	}
851 
852 	if (addr >= host_start && addr <= host_end - acc_size) {
853 		hl_access_host_mem(hdev, addr, val, acc_type);
854 	} else {
855 		rc = -EINVAL;
856 		goto err;
857 	}
858 
859 	return 0;
860 err:
861 	dev_err(hdev->dev, "invalid addr %#llx\n", addr);
862 	return rc;
863 }
864 
865 static ssize_t hl_data_read32(struct file *f, char __user *buf,
866 					size_t count, loff_t *ppos)
867 {
868 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
869 	struct hl_device *hdev = entry->hdev;
870 	u64 value64, addr = entry->addr;
871 	char tmp_buf[32];
872 	ssize_t rc;
873 	u32 val;
874 
875 	if (hdev->reset_info.in_reset) {
876 		dev_warn_ratelimited(hdev->dev, "Can't read during reset\n");
877 		return 0;
878 	}
879 
880 	if (*ppos)
881 		return 0;
882 
883 	rc = hl_access_mem(hdev, addr, &value64, DEBUGFS_READ32);
884 	if (rc)
885 		return rc;
886 
887 	val = value64; /* downcast back to 32 */
888 
889 	sprintf(tmp_buf, "0x%08x\n", val);
890 	return simple_read_from_buffer(buf, count, ppos, tmp_buf,
891 			strlen(tmp_buf));
892 }
893 
894 static ssize_t hl_data_write32(struct file *f, const char __user *buf,
895 					size_t count, loff_t *ppos)
896 {
897 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
898 	struct hl_device *hdev = entry->hdev;
899 	u64 value64, addr = entry->addr;
900 	u32 value;
901 	ssize_t rc;
902 
903 	if (hdev->reset_info.in_reset) {
904 		dev_warn_ratelimited(hdev->dev, "Can't write during reset\n");
905 		return 0;
906 	}
907 
908 	rc = kstrtouint_from_user(buf, count, 16, &value);
909 	if (rc)
910 		return rc;
911 
912 	value64 = value;
913 	rc = hl_access_mem(hdev, addr, &value64, DEBUGFS_WRITE32);
914 	if (rc)
915 		return rc;
916 
917 	return count;
918 }
919 
920 static ssize_t hl_data_read64(struct file *f, char __user *buf,
921 					size_t count, loff_t *ppos)
922 {
923 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
924 	struct hl_device *hdev = entry->hdev;
925 	u64 addr = entry->addr;
926 	char tmp_buf[32];
927 	ssize_t rc;
928 	u64 val;
929 
930 	if (hdev->reset_info.in_reset) {
931 		dev_warn_ratelimited(hdev->dev, "Can't read during reset\n");
932 		return 0;
933 	}
934 
935 	if (*ppos)
936 		return 0;
937 
938 	rc = hl_access_mem(hdev, addr, &val, DEBUGFS_READ64);
939 	if (rc)
940 		return rc;
941 
942 	sprintf(tmp_buf, "0x%016llx\n", val);
943 	return simple_read_from_buffer(buf, count, ppos, tmp_buf,
944 			strlen(tmp_buf));
945 }
946 
947 static ssize_t hl_data_write64(struct file *f, const char __user *buf,
948 					size_t count, loff_t *ppos)
949 {
950 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
951 	struct hl_device *hdev = entry->hdev;
952 	u64 addr = entry->addr;
953 	u64 value;
954 	ssize_t rc;
955 
956 	if (hdev->reset_info.in_reset) {
957 		dev_warn_ratelimited(hdev->dev, "Can't write during reset\n");
958 		return 0;
959 	}
960 
961 	rc = kstrtoull_from_user(buf, count, 16, &value);
962 	if (rc)
963 		return rc;
964 
965 	rc = hl_access_mem(hdev, addr, &value, DEBUGFS_WRITE64);
966 	if (rc)
967 		return rc;
968 
969 	return count;
970 }
971 
972 static ssize_t hl_dma_size_write(struct file *f, const char __user *buf,
973 					size_t count, loff_t *ppos)
974 {
975 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
976 	struct hl_device *hdev = entry->hdev;
977 	u64 addr = entry->addr;
978 	ssize_t rc;
979 	u32 size;
980 
981 	if (hdev->reset_info.in_reset) {
982 		dev_warn_ratelimited(hdev->dev, "Can't DMA during reset\n");
983 		return 0;
984 	}
985 	rc = kstrtouint_from_user(buf, count, 16, &size);
986 	if (rc)
987 		return rc;
988 
989 	if (!size) {
990 		dev_err(hdev->dev, "DMA read failed. size can't be 0\n");
991 		return -EINVAL;
992 	}
993 
994 	if (size > SZ_128M) {
995 		dev_err(hdev->dev,
996 			"DMA read failed. size can't be larger than 128MB\n");
997 		return -EINVAL;
998 	}
999 
1000 	if (!hl_is_device_internal_memory_va(hdev, addr, size)) {
1001 		dev_err(hdev->dev,
1002 			"DMA read failed. Invalid 0x%010llx + 0x%08x\n",
1003 			addr, size);
1004 		return -EINVAL;
1005 	}
1006 
1007 	/* Free the previous allocation, if there was any */
1008 	entry->data_dma_blob_desc.size = 0;
1009 	vfree(entry->data_dma_blob_desc.data);
1010 
1011 	entry->data_dma_blob_desc.data = vmalloc(size);
1012 	if (!entry->data_dma_blob_desc.data)
1013 		return -ENOMEM;
1014 
1015 	rc = hdev->asic_funcs->debugfs_read_dma(hdev, addr, size,
1016 						entry->data_dma_blob_desc.data);
1017 	if (rc) {
1018 		dev_err(hdev->dev, "Failed to DMA from 0x%010llx\n", addr);
1019 		vfree(entry->data_dma_blob_desc.data);
1020 		entry->data_dma_blob_desc.data = NULL;
1021 		return -EIO;
1022 	}
1023 
1024 	entry->data_dma_blob_desc.size = size;
1025 
1026 	return count;
1027 }
1028 
1029 static ssize_t hl_monitor_dump_trigger(struct file *f, const char __user *buf,
1030 					size_t count, loff_t *ppos)
1031 {
1032 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1033 	struct hl_device *hdev = entry->hdev;
1034 	u32 size, trig;
1035 	ssize_t rc;
1036 
1037 	if (hdev->reset_info.in_reset) {
1038 		dev_warn_ratelimited(hdev->dev, "Can't dump monitors during reset\n");
1039 		return 0;
1040 	}
1041 	rc = kstrtouint_from_user(buf, count, 10, &trig);
1042 	if (rc)
1043 		return rc;
1044 
1045 	if (trig != 1) {
1046 		dev_err(hdev->dev, "Must write 1 to trigger monitor dump\n");
1047 		return -EINVAL;
1048 	}
1049 
1050 	size = sizeof(struct cpucp_monitor_dump);
1051 
1052 	/* Free the previous allocation, if there was any */
1053 	entry->mon_dump_blob_desc.size = 0;
1054 	vfree(entry->mon_dump_blob_desc.data);
1055 
1056 	entry->mon_dump_blob_desc.data = vmalloc(size);
1057 	if (!entry->mon_dump_blob_desc.data)
1058 		return -ENOMEM;
1059 
1060 	rc = hdev->asic_funcs->get_monitor_dump(hdev, entry->mon_dump_blob_desc.data);
1061 	if (rc) {
1062 		dev_err(hdev->dev, "Failed to dump monitors\n");
1063 		vfree(entry->mon_dump_blob_desc.data);
1064 		entry->mon_dump_blob_desc.data = NULL;
1065 		return -EIO;
1066 	}
1067 
1068 	entry->mon_dump_blob_desc.size = size;
1069 
1070 	return count;
1071 }
1072 
1073 static ssize_t hl_get_power_state(struct file *f, char __user *buf,
1074 		size_t count, loff_t *ppos)
1075 {
1076 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1077 	struct hl_device *hdev = entry->hdev;
1078 	char tmp_buf[200];
1079 	int i;
1080 
1081 	if (*ppos)
1082 		return 0;
1083 
1084 	if (hdev->pdev->current_state == PCI_D0)
1085 		i = 1;
1086 	else if (hdev->pdev->current_state == PCI_D3hot)
1087 		i = 2;
1088 	else
1089 		i = 3;
1090 
1091 	sprintf(tmp_buf,
1092 		"current power state: %d\n1 - D0\n2 - D3hot\n3 - Unknown\n", i);
1093 	return simple_read_from_buffer(buf, count, ppos, tmp_buf,
1094 			strlen(tmp_buf));
1095 }
1096 
1097 static ssize_t hl_set_power_state(struct file *f, const char __user *buf,
1098 					size_t count, loff_t *ppos)
1099 {
1100 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1101 	struct hl_device *hdev = entry->hdev;
1102 	u32 value;
1103 	ssize_t rc;
1104 
1105 	rc = kstrtouint_from_user(buf, count, 10, &value);
1106 	if (rc)
1107 		return rc;
1108 
1109 	if (value == 1) {
1110 		pci_set_power_state(hdev->pdev, PCI_D0);
1111 		pci_restore_state(hdev->pdev);
1112 		rc = pci_enable_device(hdev->pdev);
1113 		if (rc < 0)
1114 			return rc;
1115 	} else if (value == 2) {
1116 		pci_save_state(hdev->pdev);
1117 		pci_disable_device(hdev->pdev);
1118 		pci_set_power_state(hdev->pdev, PCI_D3hot);
1119 	} else {
1120 		dev_dbg(hdev->dev, "invalid power state value %u\n", value);
1121 		return -EINVAL;
1122 	}
1123 
1124 	return count;
1125 }
1126 
1127 static ssize_t hl_i2c_data_read(struct file *f, char __user *buf,
1128 					size_t count, loff_t *ppos)
1129 {
1130 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1131 	struct hl_device *hdev = entry->hdev;
1132 	char tmp_buf[32];
1133 	u64 val;
1134 	ssize_t rc;
1135 
1136 	if (*ppos)
1137 		return 0;
1138 
1139 	rc = hl_debugfs_i2c_read(hdev, entry->i2c_bus, entry->i2c_addr,
1140 			entry->i2c_reg, entry->i2c_len, &val);
1141 	if (rc) {
1142 		dev_err(hdev->dev,
1143 			"Failed to read from I2C bus %d, addr %d, reg %d, len %d\n",
1144 			entry->i2c_bus, entry->i2c_addr, entry->i2c_reg, entry->i2c_len);
1145 		return rc;
1146 	}
1147 
1148 	sprintf(tmp_buf, "%#02llx\n", val);
1149 	rc = simple_read_from_buffer(buf, count, ppos, tmp_buf,
1150 			strlen(tmp_buf));
1151 
1152 	return rc;
1153 }
1154 
1155 static ssize_t hl_i2c_data_write(struct file *f, const char __user *buf,
1156 					size_t count, loff_t *ppos)
1157 {
1158 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1159 	struct hl_device *hdev = entry->hdev;
1160 	u64 value;
1161 	ssize_t rc;
1162 
1163 	rc = kstrtou64_from_user(buf, count, 16, &value);
1164 	if (rc)
1165 		return rc;
1166 
1167 	rc = hl_debugfs_i2c_write(hdev, entry->i2c_bus, entry->i2c_addr,
1168 			entry->i2c_reg, entry->i2c_len, value);
1169 	if (rc) {
1170 		dev_err(hdev->dev,
1171 			"Failed to write %#02llx to I2C bus %d, addr %d, reg %d, len %d\n",
1172 			value, entry->i2c_bus, entry->i2c_addr, entry->i2c_reg, entry->i2c_len);
1173 		return rc;
1174 	}
1175 
1176 	return count;
1177 }
1178 
1179 static ssize_t hl_led0_write(struct file *f, const char __user *buf,
1180 					size_t count, loff_t *ppos)
1181 {
1182 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1183 	struct hl_device *hdev = entry->hdev;
1184 	u32 value;
1185 	ssize_t rc;
1186 
1187 	rc = kstrtouint_from_user(buf, count, 10, &value);
1188 	if (rc)
1189 		return rc;
1190 
1191 	value = value ? 1 : 0;
1192 
1193 	hl_debugfs_led_set(hdev, 0, value);
1194 
1195 	return count;
1196 }
1197 
1198 static ssize_t hl_led1_write(struct file *f, const char __user *buf,
1199 					size_t count, loff_t *ppos)
1200 {
1201 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1202 	struct hl_device *hdev = entry->hdev;
1203 	u32 value;
1204 	ssize_t rc;
1205 
1206 	rc = kstrtouint_from_user(buf, count, 10, &value);
1207 	if (rc)
1208 		return rc;
1209 
1210 	value = value ? 1 : 0;
1211 
1212 	hl_debugfs_led_set(hdev, 1, value);
1213 
1214 	return count;
1215 }
1216 
1217 static ssize_t hl_led2_write(struct file *f, const char __user *buf,
1218 					size_t count, loff_t *ppos)
1219 {
1220 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1221 	struct hl_device *hdev = entry->hdev;
1222 	u32 value;
1223 	ssize_t rc;
1224 
1225 	rc = kstrtouint_from_user(buf, count, 10, &value);
1226 	if (rc)
1227 		return rc;
1228 
1229 	value = value ? 1 : 0;
1230 
1231 	hl_debugfs_led_set(hdev, 2, value);
1232 
1233 	return count;
1234 }
1235 
1236 static ssize_t hl_device_read(struct file *f, char __user *buf,
1237 					size_t count, loff_t *ppos)
1238 {
1239 	static const char *help =
1240 		"Valid values: disable, enable, suspend, resume, cpu_timeout\n";
1241 	return simple_read_from_buffer(buf, count, ppos, help, strlen(help));
1242 }
1243 
1244 static ssize_t hl_device_write(struct file *f, const char __user *buf,
1245 				     size_t count, loff_t *ppos)
1246 {
1247 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1248 	struct hl_device *hdev = entry->hdev;
1249 	char data[30] = {0};
1250 
1251 	/* don't allow partial writes */
1252 	if (*ppos != 0)
1253 		return 0;
1254 
1255 	simple_write_to_buffer(data, 29, ppos, buf, count);
1256 
1257 	if (strncmp("disable", data, strlen("disable")) == 0) {
1258 		hdev->disabled = true;
1259 	} else if (strncmp("enable", data, strlen("enable")) == 0) {
1260 		hdev->disabled = false;
1261 	} else if (strncmp("suspend", data, strlen("suspend")) == 0) {
1262 		hdev->asic_funcs->suspend(hdev);
1263 	} else if (strncmp("resume", data, strlen("resume")) == 0) {
1264 		hdev->asic_funcs->resume(hdev);
1265 	} else if (strncmp("cpu_timeout", data, strlen("cpu_timeout")) == 0) {
1266 		hdev->device_cpu_disabled = true;
1267 	} else {
1268 		dev_err(hdev->dev,
1269 			"Valid values: disable, enable, suspend, resume, cpu_timeout\n");
1270 		count = -EINVAL;
1271 	}
1272 
1273 	return count;
1274 }
1275 
1276 static ssize_t hl_clk_gate_read(struct file *f, char __user *buf,
1277 					size_t count, loff_t *ppos)
1278 {
1279 	return 0;
1280 }
1281 
1282 static ssize_t hl_clk_gate_write(struct file *f, const char __user *buf,
1283 				     size_t count, loff_t *ppos)
1284 {
1285 	return count;
1286 }
1287 
1288 static ssize_t hl_stop_on_err_read(struct file *f, char __user *buf,
1289 					size_t count, loff_t *ppos)
1290 {
1291 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1292 	struct hl_device *hdev = entry->hdev;
1293 	char tmp_buf[200];
1294 	ssize_t rc;
1295 
1296 	if (!hdev->asic_prop.configurable_stop_on_err)
1297 		return -EOPNOTSUPP;
1298 
1299 	if (*ppos)
1300 		return 0;
1301 
1302 	sprintf(tmp_buf, "%d\n", hdev->stop_on_err);
1303 	rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
1304 			strlen(tmp_buf) + 1);
1305 
1306 	return rc;
1307 }
1308 
1309 static ssize_t hl_stop_on_err_write(struct file *f, const char __user *buf,
1310 				     size_t count, loff_t *ppos)
1311 {
1312 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1313 	struct hl_device *hdev = entry->hdev;
1314 	u32 value;
1315 	ssize_t rc;
1316 
1317 	if (!hdev->asic_prop.configurable_stop_on_err)
1318 		return -EOPNOTSUPP;
1319 
1320 	if (hdev->reset_info.in_reset) {
1321 		dev_warn_ratelimited(hdev->dev,
1322 				"Can't change stop on error during reset\n");
1323 		return 0;
1324 	}
1325 
1326 	rc = kstrtouint_from_user(buf, count, 10, &value);
1327 	if (rc)
1328 		return rc;
1329 
1330 	hdev->stop_on_err = value ? 1 : 0;
1331 
1332 	hl_device_reset(hdev, 0);
1333 
1334 	return count;
1335 }
1336 
1337 static ssize_t hl_security_violations_read(struct file *f, char __user *buf,
1338 					size_t count, loff_t *ppos)
1339 {
1340 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1341 	struct hl_device *hdev = entry->hdev;
1342 
1343 	hdev->asic_funcs->ack_protection_bits_errors(hdev);
1344 
1345 	return 0;
1346 }
1347 
1348 static ssize_t hl_state_dump_read(struct file *f, char __user *buf,
1349 					size_t count, loff_t *ppos)
1350 {
1351 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1352 	ssize_t rc;
1353 
1354 	down_read(&entry->state_dump_sem);
1355 	if (!entry->state_dump[entry->state_dump_head])
1356 		rc = 0;
1357 	else
1358 		rc = simple_read_from_buffer(
1359 			buf, count, ppos,
1360 			entry->state_dump[entry->state_dump_head],
1361 			strlen(entry->state_dump[entry->state_dump_head]));
1362 	up_read(&entry->state_dump_sem);
1363 
1364 	return rc;
1365 }
1366 
1367 static ssize_t hl_state_dump_write(struct file *f, const char __user *buf,
1368 					size_t count, loff_t *ppos)
1369 {
1370 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1371 	struct hl_device *hdev = entry->hdev;
1372 	ssize_t rc;
1373 	u32 size;
1374 	int i;
1375 
1376 	rc = kstrtouint_from_user(buf, count, 10, &size);
1377 	if (rc)
1378 		return rc;
1379 
1380 	if (size <= 0 || size >= ARRAY_SIZE(entry->state_dump)) {
1381 		dev_err(hdev->dev, "Invalid number of dumps to skip\n");
1382 		return -EINVAL;
1383 	}
1384 
1385 	if (entry->state_dump[entry->state_dump_head]) {
1386 		down_write(&entry->state_dump_sem);
1387 		for (i = 0; i < size; ++i) {
1388 			vfree(entry->state_dump[entry->state_dump_head]);
1389 			entry->state_dump[entry->state_dump_head] = NULL;
1390 			if (entry->state_dump_head > 0)
1391 				entry->state_dump_head--;
1392 			else
1393 				entry->state_dump_head =
1394 					ARRAY_SIZE(entry->state_dump) - 1;
1395 		}
1396 		up_write(&entry->state_dump_sem);
1397 	}
1398 
1399 	return count;
1400 }
1401 
1402 static ssize_t hl_timeout_locked_read(struct file *f, char __user *buf,
1403 					size_t count, loff_t *ppos)
1404 {
1405 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1406 	struct hl_device *hdev = entry->hdev;
1407 	char tmp_buf[200];
1408 	ssize_t rc;
1409 
1410 	if (*ppos)
1411 		return 0;
1412 
1413 	sprintf(tmp_buf, "%d\n",
1414 		jiffies_to_msecs(hdev->timeout_jiffies) / 1000);
1415 	rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
1416 			strlen(tmp_buf) + 1);
1417 
1418 	return rc;
1419 }
1420 
1421 static ssize_t hl_timeout_locked_write(struct file *f, const char __user *buf,
1422 				     size_t count, loff_t *ppos)
1423 {
1424 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1425 	struct hl_device *hdev = entry->hdev;
1426 	u32 value;
1427 	ssize_t rc;
1428 
1429 	rc = kstrtouint_from_user(buf, count, 10, &value);
1430 	if (rc)
1431 		return rc;
1432 
1433 	if (value)
1434 		hdev->timeout_jiffies = msecs_to_jiffies(value * 1000);
1435 	else
1436 		hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
1437 
1438 	return count;
1439 }
1440 
1441 static ssize_t hl_check_razwi_happened(struct file *f, char __user *buf,
1442 					size_t count, loff_t *ppos)
1443 {
1444 	struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
1445 	struct hl_device *hdev = entry->hdev;
1446 
1447 	hdev->asic_funcs->check_if_razwi_happened(hdev);
1448 
1449 	return 0;
1450 }
1451 
1452 static const struct file_operations hl_mem_scrub_fops = {
1453 	.owner = THIS_MODULE,
1454 	.write = hl_memory_scrub,
1455 };
1456 
1457 static const struct file_operations hl_data32b_fops = {
1458 	.owner = THIS_MODULE,
1459 	.read = hl_data_read32,
1460 	.write = hl_data_write32
1461 };
1462 
1463 static const struct file_operations hl_data64b_fops = {
1464 	.owner = THIS_MODULE,
1465 	.read = hl_data_read64,
1466 	.write = hl_data_write64
1467 };
1468 
1469 static const struct file_operations hl_dma_size_fops = {
1470 	.owner = THIS_MODULE,
1471 	.write = hl_dma_size_write
1472 };
1473 
1474 static const struct file_operations hl_monitor_dump_fops = {
1475 	.owner = THIS_MODULE,
1476 	.write = hl_monitor_dump_trigger
1477 };
1478 
1479 static const struct file_operations hl_i2c_data_fops = {
1480 	.owner = THIS_MODULE,
1481 	.read = hl_i2c_data_read,
1482 	.write = hl_i2c_data_write
1483 };
1484 
1485 static const struct file_operations hl_power_fops = {
1486 	.owner = THIS_MODULE,
1487 	.read = hl_get_power_state,
1488 	.write = hl_set_power_state
1489 };
1490 
1491 static const struct file_operations hl_led0_fops = {
1492 	.owner = THIS_MODULE,
1493 	.write = hl_led0_write
1494 };
1495 
1496 static const struct file_operations hl_led1_fops = {
1497 	.owner = THIS_MODULE,
1498 	.write = hl_led1_write
1499 };
1500 
1501 static const struct file_operations hl_led2_fops = {
1502 	.owner = THIS_MODULE,
1503 	.write = hl_led2_write
1504 };
1505 
1506 static const struct file_operations hl_device_fops = {
1507 	.owner = THIS_MODULE,
1508 	.read = hl_device_read,
1509 	.write = hl_device_write
1510 };
1511 
1512 static const struct file_operations hl_clk_gate_fops = {
1513 	.owner = THIS_MODULE,
1514 	.read = hl_clk_gate_read,
1515 	.write = hl_clk_gate_write
1516 };
1517 
1518 static const struct file_operations hl_stop_on_err_fops = {
1519 	.owner = THIS_MODULE,
1520 	.read = hl_stop_on_err_read,
1521 	.write = hl_stop_on_err_write
1522 };
1523 
1524 static const struct file_operations hl_security_violations_fops = {
1525 	.owner = THIS_MODULE,
1526 	.read = hl_security_violations_read
1527 };
1528 
1529 static const struct file_operations hl_state_dump_fops = {
1530 	.owner = THIS_MODULE,
1531 	.read = hl_state_dump_read,
1532 	.write = hl_state_dump_write
1533 };
1534 
1535 static const struct file_operations hl_timeout_locked_fops = {
1536 	.owner = THIS_MODULE,
1537 	.read = hl_timeout_locked_read,
1538 	.write = hl_timeout_locked_write
1539 };
1540 
1541 static const struct file_operations hl_razwi_check_fops = {
1542 	.owner = THIS_MODULE,
1543 	.read = hl_check_razwi_happened
1544 };
1545 
1546 static const struct hl_info_list hl_debugfs_list[] = {
1547 	{"command_buffers", command_buffers_show, NULL},
1548 	{"command_submission", command_submission_show, NULL},
1549 	{"command_submission_jobs", command_submission_jobs_show, NULL},
1550 	{"userptr", userptr_show, NULL},
1551 	{"vm", vm_show, NULL},
1552 	{"userptr_lookup", userptr_lookup_show, userptr_lookup_write},
1553 	{"mmu", mmu_show, mmu_asid_va_write},
1554 	{"mmu_error", mmu_ack_error, mmu_ack_error_value_write},
1555 	{"engines", engines_show, NULL},
1556 };
1557 
1558 static int hl_debugfs_open(struct inode *inode, struct file *file)
1559 {
1560 	struct hl_debugfs_entry *node = inode->i_private;
1561 
1562 	return single_open(file, node->info_ent->show, node);
1563 }
1564 
1565 static ssize_t hl_debugfs_write(struct file *file, const char __user *buf,
1566 		size_t count, loff_t *f_pos)
1567 {
1568 	struct hl_debugfs_entry *node = file->f_inode->i_private;
1569 
1570 	if (node->info_ent->write)
1571 		return node->info_ent->write(file, buf, count, f_pos);
1572 	else
1573 		return -EINVAL;
1574 
1575 }
1576 
1577 static const struct file_operations hl_debugfs_fops = {
1578 	.owner = THIS_MODULE,
1579 	.open = hl_debugfs_open,
1580 	.read = seq_read,
1581 	.write = hl_debugfs_write,
1582 	.llseek = seq_lseek,
1583 	.release = single_release,
1584 };
1585 
1586 static void add_secured_nodes(struct hl_dbg_device_entry *dev_entry, struct dentry *root)
1587 {
1588 	debugfs_create_u8("i2c_bus",
1589 				0644,
1590 				root,
1591 				&dev_entry->i2c_bus);
1592 
1593 	debugfs_create_u8("i2c_addr",
1594 				0644,
1595 				root,
1596 				&dev_entry->i2c_addr);
1597 
1598 	debugfs_create_u8("i2c_reg",
1599 				0644,
1600 				root,
1601 				&dev_entry->i2c_reg);
1602 
1603 	debugfs_create_u8("i2c_len",
1604 				0644,
1605 				root,
1606 				&dev_entry->i2c_len);
1607 
1608 	debugfs_create_file("i2c_data",
1609 				0644,
1610 				root,
1611 				dev_entry,
1612 				&hl_i2c_data_fops);
1613 
1614 	debugfs_create_file("led0",
1615 				0200,
1616 				root,
1617 				dev_entry,
1618 				&hl_led0_fops);
1619 
1620 	debugfs_create_file("led1",
1621 				0200,
1622 				root,
1623 				dev_entry,
1624 				&hl_led1_fops);
1625 
1626 	debugfs_create_file("led2",
1627 				0200,
1628 				root,
1629 				dev_entry,
1630 				&hl_led2_fops);
1631 }
1632 
1633 static void add_files_to_device(struct hl_device *hdev, struct hl_dbg_device_entry *dev_entry,
1634 				struct dentry *root)
1635 {
1636 	int count = ARRAY_SIZE(hl_debugfs_list);
1637 	struct hl_debugfs_entry *entry;
1638 	int i;
1639 
1640 	debugfs_create_x64("memory_scrub_val",
1641 				0644,
1642 				root,
1643 				&hdev->memory_scrub_val);
1644 
1645 	debugfs_create_file("memory_scrub",
1646 				0200,
1647 				root,
1648 				dev_entry,
1649 				&hl_mem_scrub_fops);
1650 
1651 	debugfs_create_x64("addr",
1652 				0644,
1653 				root,
1654 				&dev_entry->addr);
1655 
1656 	debugfs_create_file("data32",
1657 				0644,
1658 				root,
1659 				dev_entry,
1660 				&hl_data32b_fops);
1661 
1662 	debugfs_create_file("data64",
1663 				0644,
1664 				root,
1665 				dev_entry,
1666 				&hl_data64b_fops);
1667 
1668 	debugfs_create_file("set_power_state",
1669 				0200,
1670 				root,
1671 				dev_entry,
1672 				&hl_power_fops);
1673 
1674 	debugfs_create_file("device",
1675 				0200,
1676 				root,
1677 				dev_entry,
1678 				&hl_device_fops);
1679 
1680 	debugfs_create_file("clk_gate",
1681 				0200,
1682 				root,
1683 				dev_entry,
1684 				&hl_clk_gate_fops);
1685 
1686 	debugfs_create_file("stop_on_err",
1687 				0644,
1688 				root,
1689 				dev_entry,
1690 				&hl_stop_on_err_fops);
1691 
1692 	debugfs_create_file("dump_security_violations",
1693 				0644,
1694 				root,
1695 				dev_entry,
1696 				&hl_security_violations_fops);
1697 
1698 	debugfs_create_file("dump_razwi_events",
1699 				0644,
1700 				root,
1701 				dev_entry,
1702 				&hl_razwi_check_fops);
1703 
1704 	debugfs_create_file("dma_size",
1705 				0200,
1706 				root,
1707 				dev_entry,
1708 				&hl_dma_size_fops);
1709 
1710 	debugfs_create_blob("data_dma",
1711 				0400,
1712 				root,
1713 				&dev_entry->data_dma_blob_desc);
1714 
1715 	debugfs_create_file("monitor_dump_trig",
1716 				0200,
1717 				root,
1718 				dev_entry,
1719 				&hl_monitor_dump_fops);
1720 
1721 	debugfs_create_blob("monitor_dump",
1722 				0400,
1723 				root,
1724 				&dev_entry->mon_dump_blob_desc);
1725 
1726 	debugfs_create_x8("skip_reset_on_timeout",
1727 				0644,
1728 				root,
1729 				&hdev->reset_info.skip_reset_on_timeout);
1730 
1731 	debugfs_create_file("state_dump",
1732 				0600,
1733 				root,
1734 				dev_entry,
1735 				&hl_state_dump_fops);
1736 
1737 	debugfs_create_file("timeout_locked",
1738 				0644,
1739 				root,
1740 				dev_entry,
1741 				&hl_timeout_locked_fops);
1742 
1743 	debugfs_create_u32("device_release_watchdog_timeout",
1744 				0644,
1745 				root,
1746 				&hdev->device_release_watchdog_timeout_sec);
1747 
1748 	for (i = 0, entry = dev_entry->entry_arr ; i < count ; i++, entry++) {
1749 		debugfs_create_file(hl_debugfs_list[i].name,
1750 					0444,
1751 					root,
1752 					entry,
1753 					&hl_debugfs_fops);
1754 		entry->info_ent = &hl_debugfs_list[i];
1755 		entry->dev_entry = dev_entry;
1756 	}
1757 }
1758 
1759 void hl_debugfs_add_device(struct hl_device *hdev)
1760 {
1761 	struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1762 	int count = ARRAY_SIZE(hl_debugfs_list);
1763 
1764 	dev_entry->hdev = hdev;
1765 	dev_entry->entry_arr = kmalloc_array(count,
1766 					sizeof(struct hl_debugfs_entry),
1767 					GFP_KERNEL);
1768 	if (!dev_entry->entry_arr)
1769 		return;
1770 
1771 	dev_entry->data_dma_blob_desc.size = 0;
1772 	dev_entry->data_dma_blob_desc.data = NULL;
1773 	dev_entry->mon_dump_blob_desc.size = 0;
1774 	dev_entry->mon_dump_blob_desc.data = NULL;
1775 
1776 	INIT_LIST_HEAD(&dev_entry->file_list);
1777 	INIT_LIST_HEAD(&dev_entry->cb_list);
1778 	INIT_LIST_HEAD(&dev_entry->cs_list);
1779 	INIT_LIST_HEAD(&dev_entry->cs_job_list);
1780 	INIT_LIST_HEAD(&dev_entry->userptr_list);
1781 	INIT_LIST_HEAD(&dev_entry->ctx_mem_hash_list);
1782 	mutex_init(&dev_entry->file_mutex);
1783 	init_rwsem(&dev_entry->state_dump_sem);
1784 	spin_lock_init(&dev_entry->cb_spinlock);
1785 	spin_lock_init(&dev_entry->cs_spinlock);
1786 	spin_lock_init(&dev_entry->cs_job_spinlock);
1787 	spin_lock_init(&dev_entry->userptr_spinlock);
1788 	mutex_init(&dev_entry->ctx_mem_hash_mutex);
1789 
1790 	dev_entry->root = debugfs_create_dir(dev_name(hdev->dev),
1791 						hl_debug_root);
1792 
1793 	add_files_to_device(hdev, dev_entry, dev_entry->root);
1794 	if (!hdev->asic_prop.fw_security_enabled)
1795 		add_secured_nodes(dev_entry, dev_entry->root);
1796 }
1797 
1798 void hl_debugfs_remove_device(struct hl_device *hdev)
1799 {
1800 	struct hl_dbg_device_entry *entry = &hdev->hl_debugfs;
1801 	int i;
1802 
1803 	debugfs_remove_recursive(entry->root);
1804 
1805 	mutex_destroy(&entry->ctx_mem_hash_mutex);
1806 	mutex_destroy(&entry->file_mutex);
1807 
1808 	vfree(entry->data_dma_blob_desc.data);
1809 	vfree(entry->mon_dump_blob_desc.data);
1810 
1811 	for (i = 0; i < ARRAY_SIZE(entry->state_dump); ++i)
1812 		vfree(entry->state_dump[i]);
1813 
1814 	kfree(entry->entry_arr);
1815 }
1816 
1817 void hl_debugfs_add_file(struct hl_fpriv *hpriv)
1818 {
1819 	struct hl_dbg_device_entry *dev_entry = &hpriv->hdev->hl_debugfs;
1820 
1821 	mutex_lock(&dev_entry->file_mutex);
1822 	list_add(&hpriv->debugfs_list, &dev_entry->file_list);
1823 	mutex_unlock(&dev_entry->file_mutex);
1824 }
1825 
1826 void hl_debugfs_remove_file(struct hl_fpriv *hpriv)
1827 {
1828 	struct hl_dbg_device_entry *dev_entry = &hpriv->hdev->hl_debugfs;
1829 
1830 	mutex_lock(&dev_entry->file_mutex);
1831 	list_del(&hpriv->debugfs_list);
1832 	mutex_unlock(&dev_entry->file_mutex);
1833 }
1834 
1835 void hl_debugfs_add_cb(struct hl_cb *cb)
1836 {
1837 	struct hl_dbg_device_entry *dev_entry = &cb->hdev->hl_debugfs;
1838 
1839 	spin_lock(&dev_entry->cb_spinlock);
1840 	list_add(&cb->debugfs_list, &dev_entry->cb_list);
1841 	spin_unlock(&dev_entry->cb_spinlock);
1842 }
1843 
1844 void hl_debugfs_remove_cb(struct hl_cb *cb)
1845 {
1846 	struct hl_dbg_device_entry *dev_entry = &cb->hdev->hl_debugfs;
1847 
1848 	spin_lock(&dev_entry->cb_spinlock);
1849 	list_del(&cb->debugfs_list);
1850 	spin_unlock(&dev_entry->cb_spinlock);
1851 }
1852 
1853 void hl_debugfs_add_cs(struct hl_cs *cs)
1854 {
1855 	struct hl_dbg_device_entry *dev_entry = &cs->ctx->hdev->hl_debugfs;
1856 
1857 	spin_lock(&dev_entry->cs_spinlock);
1858 	list_add(&cs->debugfs_list, &dev_entry->cs_list);
1859 	spin_unlock(&dev_entry->cs_spinlock);
1860 }
1861 
1862 void hl_debugfs_remove_cs(struct hl_cs *cs)
1863 {
1864 	struct hl_dbg_device_entry *dev_entry = &cs->ctx->hdev->hl_debugfs;
1865 
1866 	spin_lock(&dev_entry->cs_spinlock);
1867 	list_del(&cs->debugfs_list);
1868 	spin_unlock(&dev_entry->cs_spinlock);
1869 }
1870 
1871 void hl_debugfs_add_job(struct hl_device *hdev, struct hl_cs_job *job)
1872 {
1873 	struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1874 
1875 	spin_lock(&dev_entry->cs_job_spinlock);
1876 	list_add(&job->debugfs_list, &dev_entry->cs_job_list);
1877 	spin_unlock(&dev_entry->cs_job_spinlock);
1878 }
1879 
1880 void hl_debugfs_remove_job(struct hl_device *hdev, struct hl_cs_job *job)
1881 {
1882 	struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1883 
1884 	spin_lock(&dev_entry->cs_job_spinlock);
1885 	list_del(&job->debugfs_list);
1886 	spin_unlock(&dev_entry->cs_job_spinlock);
1887 }
1888 
1889 void hl_debugfs_add_userptr(struct hl_device *hdev, struct hl_userptr *userptr)
1890 {
1891 	struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1892 
1893 	spin_lock(&dev_entry->userptr_spinlock);
1894 	list_add(&userptr->debugfs_list, &dev_entry->userptr_list);
1895 	spin_unlock(&dev_entry->userptr_spinlock);
1896 }
1897 
1898 void hl_debugfs_remove_userptr(struct hl_device *hdev,
1899 				struct hl_userptr *userptr)
1900 {
1901 	struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1902 
1903 	spin_lock(&dev_entry->userptr_spinlock);
1904 	list_del(&userptr->debugfs_list);
1905 	spin_unlock(&dev_entry->userptr_spinlock);
1906 }
1907 
1908 void hl_debugfs_add_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
1909 {
1910 	struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1911 
1912 	mutex_lock(&dev_entry->ctx_mem_hash_mutex);
1913 	list_add(&ctx->debugfs_list, &dev_entry->ctx_mem_hash_list);
1914 	mutex_unlock(&dev_entry->ctx_mem_hash_mutex);
1915 }
1916 
1917 void hl_debugfs_remove_ctx_mem_hash(struct hl_device *hdev, struct hl_ctx *ctx)
1918 {
1919 	struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1920 
1921 	mutex_lock(&dev_entry->ctx_mem_hash_mutex);
1922 	list_del(&ctx->debugfs_list);
1923 	mutex_unlock(&dev_entry->ctx_mem_hash_mutex);
1924 }
1925 
1926 /**
1927  * hl_debugfs_set_state_dump - register state dump making it accessible via
1928  *                             debugfs
1929  * @hdev: pointer to the device structure
1930  * @data: the actual dump data
1931  * @length: the length of the data
1932  */
1933 void hl_debugfs_set_state_dump(struct hl_device *hdev, char *data,
1934 					unsigned long length)
1935 {
1936 	struct hl_dbg_device_entry *dev_entry = &hdev->hl_debugfs;
1937 
1938 	down_write(&dev_entry->state_dump_sem);
1939 
1940 	dev_entry->state_dump_head = (dev_entry->state_dump_head + 1) %
1941 					ARRAY_SIZE(dev_entry->state_dump);
1942 	vfree(dev_entry->state_dump[dev_entry->state_dump_head]);
1943 	dev_entry->state_dump[dev_entry->state_dump_head] = data;
1944 
1945 	up_write(&dev_entry->state_dump_sem);
1946 }
1947 
1948 void __init hl_debugfs_init(void)
1949 {
1950 	hl_debug_root = debugfs_create_dir("habanalabs", NULL);
1951 }
1952 
1953 void hl_debugfs_fini(void)
1954 {
1955 	debugfs_remove_recursive(hl_debug_root);
1956 }
1957