1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Copyright 2016-2022 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7 
8 #define pr_fmt(fmt)	"habanalabs: " fmt
9 
10 #include <uapi/drm/habanalabs_accel.h>
11 #include "habanalabs.h"
12 
13 #include <linux/fs.h>
14 #include <linux/kernel.h>
15 #include <linux/pci.h>
16 #include <linux/slab.h>
17 #include <linux/uaccess.h>
18 #include <linux/vmalloc.h>
19 
20 static u32 hl_debug_struct_size[HL_DEBUG_OP_TIMESTAMP + 1] = {
21 	[HL_DEBUG_OP_ETR] = sizeof(struct hl_debug_params_etr),
22 	[HL_DEBUG_OP_ETF] = sizeof(struct hl_debug_params_etf),
23 	[HL_DEBUG_OP_STM] = sizeof(struct hl_debug_params_stm),
24 	[HL_DEBUG_OP_FUNNEL] = 0,
25 	[HL_DEBUG_OP_BMON] = sizeof(struct hl_debug_params_bmon),
26 	[HL_DEBUG_OP_SPMU] = sizeof(struct hl_debug_params_spmu),
27 	[HL_DEBUG_OP_TIMESTAMP] = 0
28 
29 };
30 
31 static int device_status_info(struct hl_device *hdev, struct hl_info_args *args)
32 {
33 	struct hl_info_device_status dev_stat = {0};
34 	u32 size = args->return_size;
35 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
36 
37 	if ((!size) || (!out))
38 		return -EINVAL;
39 
40 	dev_stat.status = hl_device_status(hdev);
41 
42 	return copy_to_user(out, &dev_stat,
43 			min((size_t)size, sizeof(dev_stat))) ? -EFAULT : 0;
44 }
45 
46 static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
47 {
48 	struct hl_info_hw_ip_info hw_ip = {0};
49 	u32 size = args->return_size;
50 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
51 	struct asic_fixed_properties *prop = &hdev->asic_prop;
52 	u64 sram_kmd_size, dram_kmd_size, dram_available_size;
53 
54 	if ((!size) || (!out))
55 		return -EINVAL;
56 
57 	sram_kmd_size = (prop->sram_user_base_address -
58 				prop->sram_base_address);
59 	dram_kmd_size = (prop->dram_user_base_address -
60 				prop->dram_base_address);
61 
62 	hw_ip.device_id = hdev->asic_funcs->get_pci_id(hdev);
63 	hw_ip.sram_base_address = prop->sram_user_base_address;
64 	hw_ip.dram_base_address =
65 			hdev->mmu_enable && prop->dram_supports_virtual_memory ?
66 			prop->dmmu.start_addr : prop->dram_user_base_address;
67 	hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask & 0xFF;
68 	hw_ip.tpc_enabled_mask_ext = prop->tpc_enabled_mask;
69 
70 	hw_ip.sram_size = prop->sram_size - sram_kmd_size;
71 
72 	dram_available_size = prop->dram_size - dram_kmd_size;
73 
74 	if (hdev->mmu_enable == MMU_EN_ALL)
75 		hw_ip.dram_size = DIV_ROUND_DOWN_ULL(dram_available_size,
76 				prop->dram_page_size) * prop->dram_page_size;
77 	else
78 		hw_ip.dram_size = dram_available_size;
79 
80 	if (hw_ip.dram_size > PAGE_SIZE)
81 		hw_ip.dram_enabled = 1;
82 
83 	hw_ip.dram_page_size = prop->dram_page_size;
84 	hw_ip.device_mem_alloc_default_page_size = prop->device_mem_alloc_default_page_size;
85 	hw_ip.num_of_events = prop->num_of_events;
86 
87 	memcpy(hw_ip.cpucp_version, prop->cpucp_info.cpucp_version,
88 		min(VERSION_MAX_LEN, HL_INFO_VERSION_MAX_LEN));
89 
90 	memcpy(hw_ip.card_name, prop->cpucp_info.card_name,
91 		min(CARD_NAME_MAX_LEN, HL_INFO_CARD_NAME_MAX_LEN));
92 
93 	hw_ip.cpld_version = le32_to_cpu(prop->cpucp_info.cpld_version);
94 	hw_ip.module_id = le32_to_cpu(prop->cpucp_info.card_location);
95 
96 	hw_ip.psoc_pci_pll_nr = prop->psoc_pci_pll_nr;
97 	hw_ip.psoc_pci_pll_nf = prop->psoc_pci_pll_nf;
98 	hw_ip.psoc_pci_pll_od = prop->psoc_pci_pll_od;
99 	hw_ip.psoc_pci_pll_div_factor = prop->psoc_pci_pll_div_factor;
100 
101 	hw_ip.decoder_enabled_mask = prop->decoder_enabled_mask;
102 	hw_ip.mme_master_slave_mode = prop->mme_master_slave_mode;
103 	hw_ip.first_available_interrupt_id = prop->first_available_user_interrupt;
104 	hw_ip.number_of_user_interrupts = prop->user_interrupt_count;
105 
106 	hw_ip.edma_enabled_mask = prop->edma_enabled_mask;
107 	hw_ip.server_type = prop->server_type;
108 	hw_ip.security_enabled = prop->fw_security_enabled;
109 	hw_ip.revision_id = hdev->pdev->revision;
110 
111 	return copy_to_user(out, &hw_ip,
112 		min((size_t) size, sizeof(hw_ip))) ? -EFAULT : 0;
113 }
114 
115 static int hw_events_info(struct hl_device *hdev, bool aggregate,
116 			struct hl_info_args *args)
117 {
118 	u32 size, max_size = args->return_size;
119 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
120 	void *arr;
121 
122 	if ((!max_size) || (!out))
123 		return -EINVAL;
124 
125 	arr = hdev->asic_funcs->get_events_stat(hdev, aggregate, &size);
126 	if (!arr) {
127 		dev_err(hdev->dev, "Events info not supported\n");
128 		return -EOPNOTSUPP;
129 	}
130 
131 	return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0;
132 }
133 
134 static int events_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
135 {
136 	u32 max_size = args->return_size;
137 	u64 events_mask;
138 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
139 
140 	if ((max_size < sizeof(u64)) || (!out))
141 		return -EINVAL;
142 
143 	mutex_lock(&hpriv->notifier_event.lock);
144 	events_mask = hpriv->notifier_event.events_mask;
145 	hpriv->notifier_event.events_mask = 0;
146 	mutex_unlock(&hpriv->notifier_event.lock);
147 
148 	return copy_to_user(out, &events_mask, sizeof(u64)) ? -EFAULT : 0;
149 }
150 
151 static int dram_usage_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
152 {
153 	struct hl_device *hdev = hpriv->hdev;
154 	struct hl_info_dram_usage dram_usage = {0};
155 	u32 max_size = args->return_size;
156 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
157 	struct asic_fixed_properties *prop = &hdev->asic_prop;
158 	u64 dram_kmd_size;
159 
160 	if ((!max_size) || (!out))
161 		return -EINVAL;
162 
163 	dram_kmd_size = (prop->dram_user_base_address -
164 				prop->dram_base_address);
165 	dram_usage.dram_free_mem = (prop->dram_size - dram_kmd_size) -
166 					atomic64_read(&hdev->dram_used_mem);
167 	if (hpriv->ctx)
168 		dram_usage.ctx_dram_mem =
169 			atomic64_read(&hpriv->ctx->dram_phys_mem);
170 
171 	return copy_to_user(out, &dram_usage,
172 		min((size_t) max_size, sizeof(dram_usage))) ? -EFAULT : 0;
173 }
174 
175 static int hw_idle(struct hl_device *hdev, struct hl_info_args *args)
176 {
177 	struct hl_info_hw_idle hw_idle = {0};
178 	u32 max_size = args->return_size;
179 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
180 
181 	if ((!max_size) || (!out))
182 		return -EINVAL;
183 
184 	hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev,
185 					hw_idle.busy_engines_mask_ext,
186 					HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL);
187 	hw_idle.busy_engines_mask =
188 			lower_32_bits(hw_idle.busy_engines_mask_ext[0]);
189 
190 	return copy_to_user(out, &hw_idle,
191 		min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0;
192 }
193 
194 static int debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, struct hl_debug_args *args)
195 {
196 	struct hl_debug_params *params;
197 	void *input = NULL, *output = NULL;
198 	int rc;
199 
200 	params = kzalloc(sizeof(*params), GFP_KERNEL);
201 	if (!params)
202 		return -ENOMEM;
203 
204 	params->reg_idx = args->reg_idx;
205 	params->enable = args->enable;
206 	params->op = args->op;
207 
208 	if (args->input_ptr && args->input_size) {
209 		input = kzalloc(hl_debug_struct_size[args->op], GFP_KERNEL);
210 		if (!input) {
211 			rc = -ENOMEM;
212 			goto out;
213 		}
214 
215 		if (copy_from_user(input, u64_to_user_ptr(args->input_ptr),
216 					args->input_size)) {
217 			rc = -EFAULT;
218 			dev_err(hdev->dev, "failed to copy input debug data\n");
219 			goto out;
220 		}
221 
222 		params->input = input;
223 	}
224 
225 	if (args->output_ptr && args->output_size) {
226 		output = kzalloc(args->output_size, GFP_KERNEL);
227 		if (!output) {
228 			rc = -ENOMEM;
229 			goto out;
230 		}
231 
232 		params->output = output;
233 		params->output_size = args->output_size;
234 	}
235 
236 	rc = hdev->asic_funcs->debug_coresight(hdev, ctx, params);
237 	if (rc) {
238 		dev_err(hdev->dev,
239 			"debug coresight operation failed %d\n", rc);
240 		goto out;
241 	}
242 
243 	if (output && copy_to_user((void __user *) (uintptr_t) args->output_ptr,
244 					output, args->output_size)) {
245 		dev_err(hdev->dev, "copy to user failed in debug ioctl\n");
246 		rc = -EFAULT;
247 		goto out;
248 	}
249 
250 
251 out:
252 	kfree(params);
253 	kfree(output);
254 	kfree(input);
255 
256 	return rc;
257 }
258 
259 static int device_utilization(struct hl_device *hdev, struct hl_info_args *args)
260 {
261 	struct hl_info_device_utilization device_util = {0};
262 	u32 max_size = args->return_size;
263 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
264 	int rc;
265 
266 	if ((!max_size) || (!out))
267 		return -EINVAL;
268 
269 	rc = hl_device_utilization(hdev, &device_util.utilization);
270 	if (rc)
271 		return -EINVAL;
272 
273 	return copy_to_user(out, &device_util,
274 		min((size_t) max_size, sizeof(device_util))) ? -EFAULT : 0;
275 }
276 
277 static int get_clk_rate(struct hl_device *hdev, struct hl_info_args *args)
278 {
279 	struct hl_info_clk_rate clk_rate = {0};
280 	u32 max_size = args->return_size;
281 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
282 	int rc;
283 
284 	if ((!max_size) || (!out))
285 		return -EINVAL;
286 
287 	rc = hl_fw_get_clk_rate(hdev, &clk_rate.cur_clk_rate_mhz, &clk_rate.max_clk_rate_mhz);
288 	if (rc)
289 		return rc;
290 
291 	return copy_to_user(out, &clk_rate, min_t(size_t, max_size, sizeof(clk_rate)))
292 										? -EFAULT : 0;
293 }
294 
295 static int get_reset_count(struct hl_device *hdev, struct hl_info_args *args)
296 {
297 	struct hl_info_reset_count reset_count = {0};
298 	u32 max_size = args->return_size;
299 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
300 
301 	if ((!max_size) || (!out))
302 		return -EINVAL;
303 
304 	reset_count.hard_reset_cnt = hdev->reset_info.hard_reset_cnt;
305 	reset_count.soft_reset_cnt = hdev->reset_info.compute_reset_cnt;
306 
307 	return copy_to_user(out, &reset_count,
308 		min((size_t) max_size, sizeof(reset_count))) ? -EFAULT : 0;
309 }
310 
311 static int time_sync_info(struct hl_device *hdev, struct hl_info_args *args)
312 {
313 	struct hl_info_time_sync time_sync = {0};
314 	u32 max_size = args->return_size;
315 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
316 
317 	if ((!max_size) || (!out))
318 		return -EINVAL;
319 
320 	time_sync.device_time = hdev->asic_funcs->get_device_time(hdev);
321 	time_sync.host_time = ktime_get_raw_ns();
322 
323 	return copy_to_user(out, &time_sync,
324 		min((size_t) max_size, sizeof(time_sync))) ? -EFAULT : 0;
325 }
326 
327 static int pci_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
328 {
329 	struct hl_device *hdev = hpriv->hdev;
330 	struct hl_info_pci_counters pci_counters = {0};
331 	u32 max_size = args->return_size;
332 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
333 	int rc;
334 
335 	if ((!max_size) || (!out))
336 		return -EINVAL;
337 
338 	rc = hl_fw_cpucp_pci_counters_get(hdev, &pci_counters);
339 	if (rc)
340 		return rc;
341 
342 	return copy_to_user(out, &pci_counters,
343 		min((size_t) max_size, sizeof(pci_counters))) ? -EFAULT : 0;
344 }
345 
346 static int clk_throttle_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
347 {
348 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
349 	struct hl_device *hdev = hpriv->hdev;
350 	struct hl_info_clk_throttle clk_throttle = {0};
351 	ktime_t end_time, zero_time = ktime_set(0, 0);
352 	u32 max_size = args->return_size;
353 	int i;
354 
355 	if ((!max_size) || (!out))
356 		return -EINVAL;
357 
358 	mutex_lock(&hdev->clk_throttling.lock);
359 
360 	clk_throttle.clk_throttling_reason = hdev->clk_throttling.current_reason;
361 
362 	for (i = 0 ; i < HL_CLK_THROTTLE_TYPE_MAX ; i++) {
363 		if (!(hdev->clk_throttling.aggregated_reason & BIT(i)))
364 			continue;
365 
366 		clk_throttle.clk_throttling_timestamp_us[i] =
367 			ktime_to_us(hdev->clk_throttling.timestamp[i].start);
368 
369 		if (ktime_compare(hdev->clk_throttling.timestamp[i].end, zero_time))
370 			end_time = hdev->clk_throttling.timestamp[i].end;
371 		else
372 			end_time = ktime_get();
373 
374 		clk_throttle.clk_throttling_duration_ns[i] =
375 			ktime_to_ns(ktime_sub(end_time,
376 				hdev->clk_throttling.timestamp[i].start));
377 
378 	}
379 	mutex_unlock(&hdev->clk_throttling.lock);
380 
381 	return copy_to_user(out, &clk_throttle,
382 		min((size_t) max_size, sizeof(clk_throttle))) ? -EFAULT : 0;
383 }
384 
385 static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
386 {
387 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
388 	struct hl_info_cs_counters cs_counters = {0};
389 	struct hl_device *hdev = hpriv->hdev;
390 	struct hl_cs_counters_atomic *cntr;
391 	u32 max_size = args->return_size;
392 
393 	cntr = &hdev->aggregated_cs_counters;
394 
395 	if ((!max_size) || (!out))
396 		return -EINVAL;
397 
398 	cs_counters.total_out_of_mem_drop_cnt =
399 			atomic64_read(&cntr->out_of_mem_drop_cnt);
400 	cs_counters.total_parsing_drop_cnt =
401 			atomic64_read(&cntr->parsing_drop_cnt);
402 	cs_counters.total_queue_full_drop_cnt =
403 			atomic64_read(&cntr->queue_full_drop_cnt);
404 	cs_counters.total_device_in_reset_drop_cnt =
405 			atomic64_read(&cntr->device_in_reset_drop_cnt);
406 	cs_counters.total_max_cs_in_flight_drop_cnt =
407 			atomic64_read(&cntr->max_cs_in_flight_drop_cnt);
408 	cs_counters.total_validation_drop_cnt =
409 			atomic64_read(&cntr->validation_drop_cnt);
410 
411 	if (hpriv->ctx) {
412 		cs_counters.ctx_out_of_mem_drop_cnt =
413 				atomic64_read(
414 				&hpriv->ctx->cs_counters.out_of_mem_drop_cnt);
415 		cs_counters.ctx_parsing_drop_cnt =
416 				atomic64_read(
417 				&hpriv->ctx->cs_counters.parsing_drop_cnt);
418 		cs_counters.ctx_queue_full_drop_cnt =
419 				atomic64_read(
420 				&hpriv->ctx->cs_counters.queue_full_drop_cnt);
421 		cs_counters.ctx_device_in_reset_drop_cnt =
422 				atomic64_read(
423 			&hpriv->ctx->cs_counters.device_in_reset_drop_cnt);
424 		cs_counters.ctx_max_cs_in_flight_drop_cnt =
425 				atomic64_read(
426 			&hpriv->ctx->cs_counters.max_cs_in_flight_drop_cnt);
427 		cs_counters.ctx_validation_drop_cnt =
428 				atomic64_read(
429 				&hpriv->ctx->cs_counters.validation_drop_cnt);
430 	}
431 
432 	return copy_to_user(out, &cs_counters,
433 		min((size_t) max_size, sizeof(cs_counters))) ? -EFAULT : 0;
434 }
435 
436 static int sync_manager_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
437 {
438 	struct hl_device *hdev = hpriv->hdev;
439 	struct asic_fixed_properties *prop = &hdev->asic_prop;
440 	struct hl_info_sync_manager sm_info = {0};
441 	u32 max_size = args->return_size;
442 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
443 
444 	if ((!max_size) || (!out))
445 		return -EINVAL;
446 
447 	if (args->dcore_id >= HL_MAX_DCORES)
448 		return -EINVAL;
449 
450 	sm_info.first_available_sync_object =
451 			prop->first_available_user_sob[args->dcore_id];
452 	sm_info.first_available_monitor =
453 			prop->first_available_user_mon[args->dcore_id];
454 	sm_info.first_available_cq =
455 			prop->first_available_cq[args->dcore_id];
456 
457 	return copy_to_user(out, &sm_info, min_t(size_t, (size_t) max_size,
458 			sizeof(sm_info))) ? -EFAULT : 0;
459 }
460 
461 static int total_energy_consumption_info(struct hl_fpriv *hpriv,
462 			struct hl_info_args *args)
463 {
464 	struct hl_device *hdev = hpriv->hdev;
465 	struct hl_info_energy total_energy = {0};
466 	u32 max_size = args->return_size;
467 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
468 	int rc;
469 
470 	if ((!max_size) || (!out))
471 		return -EINVAL;
472 
473 	rc = hl_fw_cpucp_total_energy_get(hdev,
474 			&total_energy.total_energy_consumption);
475 	if (rc)
476 		return rc;
477 
478 	return copy_to_user(out, &total_energy,
479 		min((size_t) max_size, sizeof(total_energy))) ? -EFAULT : 0;
480 }
481 
482 static int pll_frequency_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
483 {
484 	struct hl_device *hdev = hpriv->hdev;
485 	struct hl_pll_frequency_info freq_info = { {0} };
486 	u32 max_size = args->return_size;
487 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
488 	int rc;
489 
490 	if ((!max_size) || (!out))
491 		return -EINVAL;
492 
493 	rc = hl_fw_cpucp_pll_info_get(hdev, args->pll_index, freq_info.output);
494 	if (rc)
495 		return rc;
496 
497 	return copy_to_user(out, &freq_info,
498 		min((size_t) max_size, sizeof(freq_info))) ? -EFAULT : 0;
499 }
500 
501 static int power_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
502 {
503 	struct hl_device *hdev = hpriv->hdev;
504 	u32 max_size = args->return_size;
505 	struct hl_power_info power_info = {0};
506 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
507 	int rc;
508 
509 	if ((!max_size) || (!out))
510 		return -EINVAL;
511 
512 	rc = hl_fw_cpucp_power_get(hdev, &power_info.power);
513 	if (rc)
514 		return rc;
515 
516 	return copy_to_user(out, &power_info,
517 		min((size_t) max_size, sizeof(power_info))) ? -EFAULT : 0;
518 }
519 
520 static int open_stats_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
521 {
522 	struct hl_device *hdev = hpriv->hdev;
523 	u32 max_size = args->return_size;
524 	struct hl_open_stats_info open_stats_info = {0};
525 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
526 
527 	if ((!max_size) || (!out))
528 		return -EINVAL;
529 
530 	open_stats_info.last_open_period_ms = jiffies64_to_msecs(
531 		hdev->last_open_session_duration_jif);
532 	open_stats_info.open_counter = hdev->open_counter;
533 	open_stats_info.is_compute_ctx_active = hdev->is_compute_ctx_active;
534 	open_stats_info.compute_ctx_in_release = hdev->compute_ctx_in_release;
535 
536 	return copy_to_user(out, &open_stats_info,
537 		min((size_t) max_size, sizeof(open_stats_info))) ? -EFAULT : 0;
538 }
539 
540 static int dram_pending_rows_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
541 {
542 	struct hl_device *hdev = hpriv->hdev;
543 	u32 max_size = args->return_size;
544 	u32 pend_rows_num = 0;
545 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
546 	int rc;
547 
548 	if ((!max_size) || (!out))
549 		return -EINVAL;
550 
551 	rc = hl_fw_dram_pending_row_get(hdev, &pend_rows_num);
552 	if (rc)
553 		return rc;
554 
555 	return copy_to_user(out, &pend_rows_num,
556 			min_t(size_t, max_size, sizeof(pend_rows_num))) ? -EFAULT : 0;
557 }
558 
559 static int dram_replaced_rows_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
560 {
561 	struct hl_device *hdev = hpriv->hdev;
562 	u32 max_size = args->return_size;
563 	struct cpucp_hbm_row_info info = {0};
564 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
565 	int rc;
566 
567 	if ((!max_size) || (!out))
568 		return -EINVAL;
569 
570 	rc = hl_fw_dram_replaced_row_get(hdev, &info);
571 	if (rc)
572 		return rc;
573 
574 	return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
575 }
576 
577 static int last_err_open_dev_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
578 {
579 	struct hl_info_last_err_open_dev_time info = {0};
580 	struct hl_device *hdev = hpriv->hdev;
581 	u32 max_size = args->return_size;
582 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
583 
584 	if ((!max_size) || (!out))
585 		return -EINVAL;
586 
587 	info.timestamp = ktime_to_ns(hdev->last_successful_open_ktime);
588 
589 	return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
590 }
591 
592 static int cs_timeout_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
593 {
594 	struct hl_info_cs_timeout_event info = {0};
595 	struct hl_device *hdev = hpriv->hdev;
596 	u32 max_size = args->return_size;
597 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
598 
599 	if ((!max_size) || (!out))
600 		return -EINVAL;
601 
602 	info.seq = hdev->captured_err_info.cs_timeout.seq;
603 	info.timestamp = ktime_to_ns(hdev->captured_err_info.cs_timeout.timestamp);
604 
605 	return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
606 }
607 
608 static int razwi_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
609 {
610 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
611 	struct hl_device *hdev = hpriv->hdev;
612 	u32 max_size = args->return_size;
613 	struct razwi_info *razwi_info;
614 
615 	if ((!max_size) || (!out))
616 		return -EINVAL;
617 
618 	razwi_info = &hdev->captured_err_info.razwi_info;
619 	if (!razwi_info->razwi_info_available)
620 		return 0;
621 
622 	return copy_to_user(out, &razwi_info->razwi,
623 			min_t(size_t, max_size, sizeof(struct hl_info_razwi_event))) ? -EFAULT : 0;
624 }
625 
626 static int undefined_opcode_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
627 {
628 	struct hl_device *hdev = hpriv->hdev;
629 	u32 max_size = args->return_size;
630 	struct hl_info_undefined_opcode_event info = {0};
631 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
632 
633 	if ((!max_size) || (!out))
634 		return -EINVAL;
635 
636 	info.timestamp = ktime_to_ns(hdev->captured_err_info.undef_opcode.timestamp);
637 	info.engine_id = hdev->captured_err_info.undef_opcode.engine_id;
638 	info.cq_addr = hdev->captured_err_info.undef_opcode.cq_addr;
639 	info.cq_size = hdev->captured_err_info.undef_opcode.cq_size;
640 	info.stream_id = hdev->captured_err_info.undef_opcode.stream_id;
641 	info.cb_addr_streams_len = hdev->captured_err_info.undef_opcode.cb_addr_streams_len;
642 	memcpy(info.cb_addr_streams, hdev->captured_err_info.undef_opcode.cb_addr_streams,
643 			sizeof(info.cb_addr_streams));
644 
645 	return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
646 }
647 
648 static int dev_mem_alloc_page_sizes_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
649 {
650 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
651 	struct hl_info_dev_memalloc_page_sizes info = {0};
652 	struct hl_device *hdev = hpriv->hdev;
653 	u32 max_size = args->return_size;
654 
655 	if ((!max_size) || (!out))
656 		return -EINVAL;
657 
658 	/*
659 	 * Future ASICs that will support multiple DRAM page sizes will support only "powers of 2"
660 	 * pages (unlike some of the ASICs before supporting multiple page sizes).
661 	 * For this reason for all ASICs that not support multiple page size the function will
662 	 * return an empty bitmask indicating that multiple page sizes is not supported.
663 	 */
664 	info.page_order_bitmask = hdev->asic_prop.dmmu.supported_pages_mask;
665 
666 	return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
667 }
668 
669 static int sec_attest_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
670 {
671 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
672 	struct cpucp_sec_attest_info *sec_attest_info;
673 	struct hl_info_sec_attest *info;
674 	u32 max_size = args->return_size;
675 	int rc;
676 
677 	if ((!max_size) || (!out))
678 		return -EINVAL;
679 
680 	sec_attest_info = kmalloc(sizeof(*sec_attest_info), GFP_KERNEL);
681 	if (!sec_attest_info)
682 		return -ENOMEM;
683 
684 	info = kmalloc(sizeof(*info), GFP_KERNEL);
685 	if (!info) {
686 		rc = -ENOMEM;
687 		goto free_sec_attest_info;
688 	}
689 
690 	rc = hl_fw_get_sec_attest_info(hpriv->hdev, sec_attest_info, args->sec_attest_nonce);
691 	if (rc)
692 		goto free_info;
693 
694 	info->nonce = le32_to_cpu(sec_attest_info->nonce);
695 	info->pcr_quote_len = le16_to_cpu(sec_attest_info->pcr_quote_len);
696 	info->pub_data_len = le16_to_cpu(sec_attest_info->pub_data_len);
697 	info->certificate_len = le16_to_cpu(sec_attest_info->certificate_len);
698 	info->pcr_num_reg = sec_attest_info->pcr_num_reg;
699 	info->pcr_reg_len = sec_attest_info->pcr_reg_len;
700 	info->quote_sig_len = sec_attest_info->quote_sig_len;
701 	memcpy(&info->pcr_data, &sec_attest_info->pcr_data, sizeof(info->pcr_data));
702 	memcpy(&info->pcr_quote, &sec_attest_info->pcr_quote, sizeof(info->pcr_quote));
703 	memcpy(&info->public_data, &sec_attest_info->public_data, sizeof(info->public_data));
704 	memcpy(&info->certificate, &sec_attest_info->certificate, sizeof(info->certificate));
705 	memcpy(&info->quote_sig, &sec_attest_info->quote_sig, sizeof(info->quote_sig));
706 
707 	rc = copy_to_user(out, info,
708 				min_t(size_t, max_size, sizeof(*info))) ? -EFAULT : 0;
709 
710 free_info:
711 	kfree(info);
712 free_sec_attest_info:
713 	kfree(sec_attest_info);
714 
715 	return rc;
716 }
717 
718 static int eventfd_register(struct hl_fpriv *hpriv, struct hl_info_args *args)
719 {
720 	int rc;
721 
722 	/* check if there is already a registered on that process */
723 	mutex_lock(&hpriv->notifier_event.lock);
724 	if (hpriv->notifier_event.eventfd) {
725 		mutex_unlock(&hpriv->notifier_event.lock);
726 		return -EINVAL;
727 	}
728 
729 	hpriv->notifier_event.eventfd = eventfd_ctx_fdget(args->eventfd);
730 	if (IS_ERR(hpriv->notifier_event.eventfd)) {
731 		rc = PTR_ERR(hpriv->notifier_event.eventfd);
732 		hpriv->notifier_event.eventfd = NULL;
733 		mutex_unlock(&hpriv->notifier_event.lock);
734 		return rc;
735 	}
736 
737 	mutex_unlock(&hpriv->notifier_event.lock);
738 	return 0;
739 }
740 
741 static int eventfd_unregister(struct hl_fpriv *hpriv, struct hl_info_args *args)
742 {
743 	mutex_lock(&hpriv->notifier_event.lock);
744 	if (!hpriv->notifier_event.eventfd) {
745 		mutex_unlock(&hpriv->notifier_event.lock);
746 		return -EINVAL;
747 	}
748 
749 	eventfd_ctx_put(hpriv->notifier_event.eventfd);
750 	hpriv->notifier_event.eventfd = NULL;
751 	mutex_unlock(&hpriv->notifier_event.lock);
752 	return 0;
753 }
754 
755 static int engine_status_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
756 {
757 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
758 	u32 status_buf_size = args->return_size;
759 	struct hl_device *hdev = hpriv->hdev;
760 	struct engines_data eng_data;
761 	int rc;
762 
763 	if ((status_buf_size < SZ_1K) || (status_buf_size > HL_ENGINES_DATA_MAX_SIZE) || (!out))
764 		return -EINVAL;
765 
766 	eng_data.actual_size = 0;
767 	eng_data.allocated_buf_size = status_buf_size;
768 	eng_data.buf = vmalloc(status_buf_size);
769 	if (!eng_data.buf)
770 		return -ENOMEM;
771 
772 	hdev->asic_funcs->is_device_idle(hdev, NULL, 0, &eng_data);
773 
774 	if (eng_data.actual_size > eng_data.allocated_buf_size) {
775 		dev_err(hdev->dev,
776 			"Engines data size (%d Bytes) is bigger than allocated size (%u Bytes)\n",
777 			eng_data.actual_size, status_buf_size);
778 		vfree(eng_data.buf);
779 		return -ENOMEM;
780 	}
781 
782 	args->user_buffer_actual_size = eng_data.actual_size;
783 	rc = copy_to_user(out, eng_data.buf, min_t(size_t, status_buf_size, eng_data.actual_size)) ?
784 				-EFAULT : 0;
785 
786 	vfree(eng_data.buf);
787 
788 	return rc;
789 }
790 
791 static int page_fault_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
792 {
793 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
794 	struct hl_device *hdev = hpriv->hdev;
795 	u32 max_size = args->return_size;
796 	struct page_fault_info *pgf_info;
797 
798 	if ((!max_size) || (!out))
799 		return -EINVAL;
800 
801 	pgf_info = &hdev->captured_err_info.page_fault_info;
802 	if (!pgf_info->page_fault_info_available)
803 		return 0;
804 
805 	return copy_to_user(out, &pgf_info->page_fault,
806 			min_t(size_t, max_size, sizeof(struct hl_page_fault_info))) ? -EFAULT : 0;
807 }
808 
809 static int user_mappings_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
810 {
811 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
812 	u32 user_buf_size = args->return_size;
813 	struct hl_device *hdev = hpriv->hdev;
814 	struct page_fault_info *pgf_info;
815 	u64 actual_size;
816 
817 	if (!out)
818 		return -EINVAL;
819 
820 	pgf_info = &hdev->captured_err_info.page_fault_info;
821 	if (!pgf_info->page_fault_info_available)
822 		return 0;
823 
824 	args->array_size = pgf_info->num_of_user_mappings;
825 
826 	actual_size = pgf_info->num_of_user_mappings * sizeof(struct hl_user_mapping);
827 	if (user_buf_size < actual_size)
828 		return -ENOMEM;
829 
830 	return copy_to_user(out, pgf_info->user_mappings, actual_size) ? -EFAULT : 0;
831 }
832 
833 static int send_fw_generic_request(struct hl_device *hdev, struct hl_info_args *info_args)
834 {
835 	void __user *buff = (void __user *) (uintptr_t) info_args->return_pointer;
836 	u32 size = info_args->return_size;
837 	dma_addr_t dma_handle;
838 	bool need_input_buff;
839 	void *fw_buff;
840 	int rc = 0;
841 
842 	switch (info_args->fw_sub_opcode) {
843 	case HL_PASSTHROUGH_VERSIONS:
844 		need_input_buff = false;
845 		break;
846 	default:
847 		return -EINVAL;
848 	}
849 
850 	if (size > SZ_1M) {
851 		dev_err(hdev->dev, "buffer size cannot exceed 1MB\n");
852 		return -EINVAL;
853 	}
854 
855 	fw_buff = hl_cpu_accessible_dma_pool_alloc(hdev, size, &dma_handle);
856 	if (!fw_buff)
857 		return -ENOMEM;
858 
859 
860 	if (need_input_buff && copy_from_user(fw_buff, buff, size)) {
861 		dev_dbg(hdev->dev, "Failed to copy from user FW buff\n");
862 		rc = -EFAULT;
863 		goto free_buff;
864 	}
865 
866 	rc = hl_fw_send_generic_request(hdev, info_args->fw_sub_opcode, dma_handle, &size);
867 	if (rc)
868 		goto free_buff;
869 
870 	if (copy_to_user(buff, fw_buff, min(size, info_args->return_size))) {
871 		dev_dbg(hdev->dev, "Failed to copy to user FW generic req output\n");
872 		rc = -EFAULT;
873 	}
874 
875 free_buff:
876 	hl_cpu_accessible_dma_pool_free(hdev, info_args->return_size, fw_buff);
877 
878 	return rc;
879 }
880 
881 static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
882 				struct device *dev)
883 {
884 	enum hl_device_status status;
885 	struct hl_info_args *args = data;
886 	struct hl_device *hdev = hpriv->hdev;
887 	int rc;
888 
889 	if (args->pad) {
890 		dev_dbg(hdev->dev, "Padding bytes must be 0\n");
891 		return -EINVAL;
892 	}
893 
894 	/*
895 	 * Information is returned for the following opcodes even if the device
896 	 * is disabled or in reset.
897 	 */
898 	switch (args->op) {
899 	case HL_INFO_HW_IP_INFO:
900 		return hw_ip_info(hdev, args);
901 
902 	case HL_INFO_DEVICE_STATUS:
903 		return device_status_info(hdev, args);
904 
905 	case HL_INFO_RESET_COUNT:
906 		return get_reset_count(hdev, args);
907 
908 	case HL_INFO_HW_EVENTS:
909 		return hw_events_info(hdev, false, args);
910 
911 	case HL_INFO_HW_EVENTS_AGGREGATE:
912 		return hw_events_info(hdev, true, args);
913 
914 	case HL_INFO_CS_COUNTERS:
915 		return cs_counters_info(hpriv, args);
916 
917 	case HL_INFO_CLK_THROTTLE_REASON:
918 		return clk_throttle_info(hpriv, args);
919 
920 	case HL_INFO_SYNC_MANAGER:
921 		return sync_manager_info(hpriv, args);
922 
923 	case HL_INFO_OPEN_STATS:
924 		return open_stats_info(hpriv, args);
925 
926 	case HL_INFO_LAST_ERR_OPEN_DEV_TIME:
927 		return last_err_open_dev_info(hpriv, args);
928 
929 	case HL_INFO_CS_TIMEOUT_EVENT:
930 		return cs_timeout_info(hpriv, args);
931 
932 	case HL_INFO_RAZWI_EVENT:
933 		return razwi_info(hpriv, args);
934 
935 	case HL_INFO_UNDEFINED_OPCODE_EVENT:
936 		return undefined_opcode_info(hpriv, args);
937 
938 	case HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES:
939 		return dev_mem_alloc_page_sizes_info(hpriv, args);
940 
941 	case HL_INFO_GET_EVENTS:
942 		return events_info(hpriv, args);
943 
944 	case HL_INFO_PAGE_FAULT_EVENT:
945 		return page_fault_info(hpriv, args);
946 
947 	case HL_INFO_USER_MAPPINGS:
948 		return user_mappings_info(hpriv, args);
949 
950 	case HL_INFO_UNREGISTER_EVENTFD:
951 		return eventfd_unregister(hpriv, args);
952 
953 	default:
954 		break;
955 	}
956 
957 	if (!hl_device_operational(hdev, &status)) {
958 		dev_dbg_ratelimited(dev,
959 			"Device is %s. Can't execute INFO IOCTL\n",
960 			hdev->status[status]);
961 		return -EBUSY;
962 	}
963 
964 	switch (args->op) {
965 	case HL_INFO_DRAM_USAGE:
966 		rc = dram_usage_info(hpriv, args);
967 		break;
968 
969 	case HL_INFO_HW_IDLE:
970 		rc = hw_idle(hdev, args);
971 		break;
972 
973 	case HL_INFO_DEVICE_UTILIZATION:
974 		rc = device_utilization(hdev, args);
975 		break;
976 
977 	case HL_INFO_CLK_RATE:
978 		rc = get_clk_rate(hdev, args);
979 		break;
980 
981 	case HL_INFO_TIME_SYNC:
982 		return time_sync_info(hdev, args);
983 
984 	case HL_INFO_PCI_COUNTERS:
985 		return pci_counters_info(hpriv, args);
986 
987 	case HL_INFO_TOTAL_ENERGY:
988 		return total_energy_consumption_info(hpriv, args);
989 
990 	case HL_INFO_PLL_FREQUENCY:
991 		return pll_frequency_info(hpriv, args);
992 
993 	case HL_INFO_POWER:
994 		return power_info(hpriv, args);
995 
996 
997 	case HL_INFO_DRAM_REPLACED_ROWS:
998 		return dram_replaced_rows_info(hpriv, args);
999 
1000 	case HL_INFO_DRAM_PENDING_ROWS:
1001 		return dram_pending_rows_info(hpriv, args);
1002 
1003 	case HL_INFO_SECURED_ATTESTATION:
1004 		return sec_attest_info(hpriv, args);
1005 
1006 	case HL_INFO_REGISTER_EVENTFD:
1007 		return eventfd_register(hpriv, args);
1008 
1009 	case HL_INFO_ENGINE_STATUS:
1010 		return engine_status_info(hpriv, args);
1011 
1012 	case HL_INFO_FW_GENERIC_REQ:
1013 		return send_fw_generic_request(hdev, args);
1014 
1015 	default:
1016 		dev_err(dev, "Invalid request %d\n", args->op);
1017 		rc = -EINVAL;
1018 		break;
1019 	}
1020 
1021 	return rc;
1022 }
1023 
1024 static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data)
1025 {
1026 	return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev);
1027 }
1028 
1029 static int hl_info_ioctl_control(struct hl_fpriv *hpriv, void *data)
1030 {
1031 	return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev_ctrl);
1032 }
1033 
1034 static int hl_debug_ioctl(struct hl_fpriv *hpriv, void *data)
1035 {
1036 	struct hl_debug_args *args = data;
1037 	struct hl_device *hdev = hpriv->hdev;
1038 	enum hl_device_status status;
1039 
1040 	int rc = 0;
1041 
1042 	if (!hl_device_operational(hdev, &status)) {
1043 		dev_dbg_ratelimited(hdev->dev,
1044 			"Device is %s. Can't execute DEBUG IOCTL\n",
1045 			hdev->status[status]);
1046 		return -EBUSY;
1047 	}
1048 
1049 	switch (args->op) {
1050 	case HL_DEBUG_OP_ETR:
1051 	case HL_DEBUG_OP_ETF:
1052 	case HL_DEBUG_OP_STM:
1053 	case HL_DEBUG_OP_FUNNEL:
1054 	case HL_DEBUG_OP_BMON:
1055 	case HL_DEBUG_OP_SPMU:
1056 	case HL_DEBUG_OP_TIMESTAMP:
1057 		if (!hdev->in_debug) {
1058 			dev_err_ratelimited(hdev->dev,
1059 				"Rejecting debug configuration request because device not in debug mode\n");
1060 			return -EFAULT;
1061 		}
1062 		args->input_size = min(args->input_size, hl_debug_struct_size[args->op]);
1063 		rc = debug_coresight(hdev, hpriv->ctx, args);
1064 		break;
1065 
1066 	case HL_DEBUG_OP_SET_MODE:
1067 		rc = hl_device_set_debug_mode(hdev, hpriv->ctx, (bool) args->enable);
1068 		break;
1069 
1070 	default:
1071 		dev_err(hdev->dev, "Invalid request %d\n", args->op);
1072 		rc = -EINVAL;
1073 		break;
1074 	}
1075 
1076 	return rc;
1077 }
1078 
1079 #define HL_IOCTL_DEF(ioctl, _func) \
1080 	[_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func}
1081 
1082 static const struct hl_ioctl_desc hl_ioctls[] = {
1083 	HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl),
1084 	HL_IOCTL_DEF(HL_IOCTL_CB, hl_cb_ioctl),
1085 	HL_IOCTL_DEF(HL_IOCTL_CS, hl_cs_ioctl),
1086 	HL_IOCTL_DEF(HL_IOCTL_WAIT_CS, hl_wait_ioctl),
1087 	HL_IOCTL_DEF(HL_IOCTL_MEMORY, hl_mem_ioctl),
1088 	HL_IOCTL_DEF(HL_IOCTL_DEBUG, hl_debug_ioctl)
1089 };
1090 
1091 static const struct hl_ioctl_desc hl_ioctls_control[] = {
1092 	HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl_control)
1093 };
1094 
1095 static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg,
1096 		const struct hl_ioctl_desc *ioctl, struct device *dev)
1097 {
1098 	struct hl_fpriv *hpriv = filep->private_data;
1099 	unsigned int nr = _IOC_NR(cmd);
1100 	char stack_kdata[128] = {0};
1101 	char *kdata = NULL;
1102 	unsigned int usize, asize;
1103 	hl_ioctl_t *func;
1104 	u32 hl_size;
1105 	int retcode;
1106 
1107 	/* Do not trust userspace, use our own definition */
1108 	func = ioctl->func;
1109 
1110 	if (unlikely(!func)) {
1111 		dev_dbg(dev, "no function\n");
1112 		retcode = -ENOTTY;
1113 		goto out_err;
1114 	}
1115 
1116 	hl_size = _IOC_SIZE(ioctl->cmd);
1117 	usize = asize = _IOC_SIZE(cmd);
1118 	if (hl_size > asize)
1119 		asize = hl_size;
1120 
1121 	cmd = ioctl->cmd;
1122 
1123 	if (cmd & (IOC_IN | IOC_OUT)) {
1124 		if (asize <= sizeof(stack_kdata)) {
1125 			kdata = stack_kdata;
1126 		} else {
1127 			kdata = kzalloc(asize, GFP_KERNEL);
1128 			if (!kdata) {
1129 				retcode = -ENOMEM;
1130 				goto out_err;
1131 			}
1132 		}
1133 	}
1134 
1135 	if (cmd & IOC_IN) {
1136 		if (copy_from_user(kdata, (void __user *)arg, usize)) {
1137 			retcode = -EFAULT;
1138 			goto out_err;
1139 		}
1140 	}
1141 
1142 	retcode = func(hpriv, kdata);
1143 
1144 	if ((cmd & IOC_OUT) && copy_to_user((void __user *)arg, kdata, usize))
1145 		retcode = -EFAULT;
1146 
1147 out_err:
1148 	if (retcode)
1149 		dev_dbg(dev, "error in ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
1150 			  task_pid_nr(current), cmd, nr);
1151 
1152 	if (kdata != stack_kdata)
1153 		kfree(kdata);
1154 
1155 	return retcode;
1156 }
1157 
1158 long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
1159 {
1160 	struct hl_fpriv *hpriv = filep->private_data;
1161 	struct hl_device *hdev = hpriv->hdev;
1162 	const struct hl_ioctl_desc *ioctl = NULL;
1163 	unsigned int nr = _IOC_NR(cmd);
1164 
1165 	if (!hdev) {
1166 		pr_err_ratelimited("Sending ioctl after device was removed! Please close FD\n");
1167 		return -ENODEV;
1168 	}
1169 
1170 	if ((nr >= HL_COMMAND_START) && (nr < HL_COMMAND_END)) {
1171 		ioctl = &hl_ioctls[nr];
1172 	} else {
1173 		dev_err(hdev->dev, "invalid ioctl: pid=%d, nr=0x%02x\n",
1174 			task_pid_nr(current), nr);
1175 		return -ENOTTY;
1176 	}
1177 
1178 	return _hl_ioctl(filep, cmd, arg, ioctl, hdev->dev);
1179 }
1180 
1181 long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg)
1182 {
1183 	struct hl_fpriv *hpriv = filep->private_data;
1184 	struct hl_device *hdev = hpriv->hdev;
1185 	const struct hl_ioctl_desc *ioctl = NULL;
1186 	unsigned int nr = _IOC_NR(cmd);
1187 
1188 	if (!hdev) {
1189 		pr_err_ratelimited("Sending ioctl after device was removed! Please close FD\n");
1190 		return -ENODEV;
1191 	}
1192 
1193 	if (nr == _IOC_NR(HL_IOCTL_INFO)) {
1194 		ioctl = &hl_ioctls_control[nr];
1195 	} else {
1196 		dev_err(hdev->dev_ctrl, "invalid ioctl: pid=%d, nr=0x%02x\n",
1197 			task_pid_nr(current), nr);
1198 		return -ENOTTY;
1199 	}
1200 
1201 	return _hl_ioctl(filep, cmd, arg, ioctl, hdev->dev_ctrl);
1202 }
1203