1 // SPDX-License-Identifier: GPL-2.0
2 
3 /*
4  * Copyright 2016-2022 HabanaLabs, Ltd.
5  * All Rights Reserved.
6  */
7 
8 #define pr_fmt(fmt)	"habanalabs: " fmt
9 
10 #include <uapi/drm/habanalabs_accel.h>
11 #include "habanalabs.h"
12 
13 #include <linux/fs.h>
14 #include <linux/kernel.h>
15 #include <linux/pci.h>
16 #include <linux/slab.h>
17 #include <linux/uaccess.h>
18 #include <linux/vmalloc.h>
19 
20 static u32 hl_debug_struct_size[HL_DEBUG_OP_TIMESTAMP + 1] = {
21 	[HL_DEBUG_OP_ETR] = sizeof(struct hl_debug_params_etr),
22 	[HL_DEBUG_OP_ETF] = sizeof(struct hl_debug_params_etf),
23 	[HL_DEBUG_OP_STM] = sizeof(struct hl_debug_params_stm),
24 	[HL_DEBUG_OP_FUNNEL] = 0,
25 	[HL_DEBUG_OP_BMON] = sizeof(struct hl_debug_params_bmon),
26 	[HL_DEBUG_OP_SPMU] = sizeof(struct hl_debug_params_spmu),
27 	[HL_DEBUG_OP_TIMESTAMP] = 0
28 
29 };
30 
31 static int device_status_info(struct hl_device *hdev, struct hl_info_args *args)
32 {
33 	struct hl_info_device_status dev_stat = {0};
34 	u32 size = args->return_size;
35 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
36 
37 	if ((!size) || (!out))
38 		return -EINVAL;
39 
40 	dev_stat.status = hl_device_status(hdev);
41 
42 	return copy_to_user(out, &dev_stat,
43 			min((size_t)size, sizeof(dev_stat))) ? -EFAULT : 0;
44 }
45 
46 static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
47 {
48 	struct hl_info_hw_ip_info hw_ip = {0};
49 	u32 size = args->return_size;
50 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
51 	struct asic_fixed_properties *prop = &hdev->asic_prop;
52 	u64 sram_kmd_size, dram_kmd_size, dram_available_size;
53 
54 	if ((!size) || (!out))
55 		return -EINVAL;
56 
57 	sram_kmd_size = (prop->sram_user_base_address -
58 				prop->sram_base_address);
59 	dram_kmd_size = (prop->dram_user_base_address -
60 				prop->dram_base_address);
61 
62 	hw_ip.device_id = hdev->asic_funcs->get_pci_id(hdev);
63 	hw_ip.sram_base_address = prop->sram_user_base_address;
64 	hw_ip.dram_base_address =
65 			hdev->mmu_enable && prop->dram_supports_virtual_memory ?
66 			prop->dmmu.start_addr : prop->dram_user_base_address;
67 	hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask & 0xFF;
68 	hw_ip.tpc_enabled_mask_ext = prop->tpc_enabled_mask;
69 
70 	hw_ip.sram_size = prop->sram_size - sram_kmd_size;
71 
72 	dram_available_size = prop->dram_size - dram_kmd_size;
73 
74 	if (hdev->mmu_enable == MMU_EN_ALL)
75 		hw_ip.dram_size = DIV_ROUND_DOWN_ULL(dram_available_size,
76 				prop->dram_page_size) * prop->dram_page_size;
77 	else
78 		hw_ip.dram_size = dram_available_size;
79 
80 	if (hw_ip.dram_size > PAGE_SIZE)
81 		hw_ip.dram_enabled = 1;
82 
83 	hw_ip.dram_page_size = prop->dram_page_size;
84 	hw_ip.device_mem_alloc_default_page_size = prop->device_mem_alloc_default_page_size;
85 	hw_ip.num_of_events = prop->num_of_events;
86 
87 	memcpy(hw_ip.cpucp_version, prop->cpucp_info.cpucp_version,
88 		min(VERSION_MAX_LEN, HL_INFO_VERSION_MAX_LEN));
89 
90 	memcpy(hw_ip.card_name, prop->cpucp_info.card_name,
91 		min(CARD_NAME_MAX_LEN, HL_INFO_CARD_NAME_MAX_LEN));
92 
93 	hw_ip.cpld_version = le32_to_cpu(prop->cpucp_info.cpld_version);
94 	hw_ip.module_id = le32_to_cpu(prop->cpucp_info.card_location);
95 
96 	hw_ip.psoc_pci_pll_nr = prop->psoc_pci_pll_nr;
97 	hw_ip.psoc_pci_pll_nf = prop->psoc_pci_pll_nf;
98 	hw_ip.psoc_pci_pll_od = prop->psoc_pci_pll_od;
99 	hw_ip.psoc_pci_pll_div_factor = prop->psoc_pci_pll_div_factor;
100 
101 	hw_ip.decoder_enabled_mask = prop->decoder_enabled_mask;
102 	hw_ip.mme_master_slave_mode = prop->mme_master_slave_mode;
103 	hw_ip.first_available_interrupt_id = prop->first_available_user_interrupt;
104 	hw_ip.number_of_user_interrupts = prop->user_interrupt_count;
105 	hw_ip.tpc_interrupt_id = prop->tpc_interrupt_id;
106 
107 	hw_ip.edma_enabled_mask = prop->edma_enabled_mask;
108 	hw_ip.server_type = prop->server_type;
109 	hw_ip.security_enabled = prop->fw_security_enabled;
110 	hw_ip.revision_id = hdev->pdev->revision;
111 	hw_ip.engine_core_interrupt_reg_addr = prop->engine_core_interrupt_reg_addr;
112 	hw_ip.reserved_dram_size = dram_kmd_size;
113 
114 	return copy_to_user(out, &hw_ip,
115 		min((size_t) size, sizeof(hw_ip))) ? -EFAULT : 0;
116 }
117 
118 static int hw_events_info(struct hl_device *hdev, bool aggregate,
119 			struct hl_info_args *args)
120 {
121 	u32 size, max_size = args->return_size;
122 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
123 	void *arr;
124 
125 	if ((!max_size) || (!out))
126 		return -EINVAL;
127 
128 	arr = hdev->asic_funcs->get_events_stat(hdev, aggregate, &size);
129 	if (!arr) {
130 		dev_err(hdev->dev, "Events info not supported\n");
131 		return -EOPNOTSUPP;
132 	}
133 
134 	return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0;
135 }
136 
137 static int events_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
138 {
139 	u32 max_size = args->return_size;
140 	u64 events_mask;
141 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
142 
143 	if ((max_size < sizeof(u64)) || (!out))
144 		return -EINVAL;
145 
146 	mutex_lock(&hpriv->notifier_event.lock);
147 	events_mask = hpriv->notifier_event.events_mask;
148 	hpriv->notifier_event.events_mask = 0;
149 	mutex_unlock(&hpriv->notifier_event.lock);
150 
151 	return copy_to_user(out, &events_mask, sizeof(u64)) ? -EFAULT : 0;
152 }
153 
154 static int dram_usage_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
155 {
156 	struct hl_device *hdev = hpriv->hdev;
157 	struct hl_info_dram_usage dram_usage = {0};
158 	u32 max_size = args->return_size;
159 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
160 	struct asic_fixed_properties *prop = &hdev->asic_prop;
161 	u64 dram_kmd_size;
162 
163 	if ((!max_size) || (!out))
164 		return -EINVAL;
165 
166 	dram_kmd_size = (prop->dram_user_base_address -
167 				prop->dram_base_address);
168 	dram_usage.dram_free_mem = (prop->dram_size - dram_kmd_size) -
169 					atomic64_read(&hdev->dram_used_mem);
170 	if (hpriv->ctx)
171 		dram_usage.ctx_dram_mem =
172 			atomic64_read(&hpriv->ctx->dram_phys_mem);
173 
174 	return copy_to_user(out, &dram_usage,
175 		min((size_t) max_size, sizeof(dram_usage))) ? -EFAULT : 0;
176 }
177 
178 static int hw_idle(struct hl_device *hdev, struct hl_info_args *args)
179 {
180 	struct hl_info_hw_idle hw_idle = {0};
181 	u32 max_size = args->return_size;
182 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
183 
184 	if ((!max_size) || (!out))
185 		return -EINVAL;
186 
187 	hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev,
188 					hw_idle.busy_engines_mask_ext,
189 					HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL);
190 	hw_idle.busy_engines_mask =
191 			lower_32_bits(hw_idle.busy_engines_mask_ext[0]);
192 
193 	return copy_to_user(out, &hw_idle,
194 		min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0;
195 }
196 
197 static int debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, struct hl_debug_args *args)
198 {
199 	struct hl_debug_params *params;
200 	void *input = NULL, *output = NULL;
201 	int rc;
202 
203 	params = kzalloc(sizeof(*params), GFP_KERNEL);
204 	if (!params)
205 		return -ENOMEM;
206 
207 	params->reg_idx = args->reg_idx;
208 	params->enable = args->enable;
209 	params->op = args->op;
210 
211 	if (args->input_ptr && args->input_size) {
212 		input = kzalloc(hl_debug_struct_size[args->op], GFP_KERNEL);
213 		if (!input) {
214 			rc = -ENOMEM;
215 			goto out;
216 		}
217 
218 		if (copy_from_user(input, u64_to_user_ptr(args->input_ptr),
219 					args->input_size)) {
220 			rc = -EFAULT;
221 			dev_err(hdev->dev, "failed to copy input debug data\n");
222 			goto out;
223 		}
224 
225 		params->input = input;
226 	}
227 
228 	if (args->output_ptr && args->output_size) {
229 		output = kzalloc(args->output_size, GFP_KERNEL);
230 		if (!output) {
231 			rc = -ENOMEM;
232 			goto out;
233 		}
234 
235 		params->output = output;
236 		params->output_size = args->output_size;
237 	}
238 
239 	rc = hdev->asic_funcs->debug_coresight(hdev, ctx, params);
240 	if (rc) {
241 		dev_err(hdev->dev,
242 			"debug coresight operation failed %d\n", rc);
243 		goto out;
244 	}
245 
246 	if (output && copy_to_user((void __user *) (uintptr_t) args->output_ptr,
247 					output, args->output_size)) {
248 		dev_err(hdev->dev, "copy to user failed in debug ioctl\n");
249 		rc = -EFAULT;
250 		goto out;
251 	}
252 
253 
254 out:
255 	kfree(params);
256 	kfree(output);
257 	kfree(input);
258 
259 	return rc;
260 }
261 
262 static int device_utilization(struct hl_device *hdev, struct hl_info_args *args)
263 {
264 	struct hl_info_device_utilization device_util = {0};
265 	u32 max_size = args->return_size;
266 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
267 	int rc;
268 
269 	if ((!max_size) || (!out))
270 		return -EINVAL;
271 
272 	rc = hl_device_utilization(hdev, &device_util.utilization);
273 	if (rc)
274 		return -EINVAL;
275 
276 	return copy_to_user(out, &device_util,
277 		min((size_t) max_size, sizeof(device_util))) ? -EFAULT : 0;
278 }
279 
280 static int get_clk_rate(struct hl_device *hdev, struct hl_info_args *args)
281 {
282 	struct hl_info_clk_rate clk_rate = {0};
283 	u32 max_size = args->return_size;
284 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
285 	int rc;
286 
287 	if ((!max_size) || (!out))
288 		return -EINVAL;
289 
290 	rc = hl_fw_get_clk_rate(hdev, &clk_rate.cur_clk_rate_mhz, &clk_rate.max_clk_rate_mhz);
291 	if (rc)
292 		return rc;
293 
294 	return copy_to_user(out, &clk_rate, min_t(size_t, max_size, sizeof(clk_rate)))
295 										? -EFAULT : 0;
296 }
297 
298 static int get_reset_count(struct hl_device *hdev, struct hl_info_args *args)
299 {
300 	struct hl_info_reset_count reset_count = {0};
301 	u32 max_size = args->return_size;
302 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
303 
304 	if ((!max_size) || (!out))
305 		return -EINVAL;
306 
307 	reset_count.hard_reset_cnt = hdev->reset_info.hard_reset_cnt;
308 	reset_count.soft_reset_cnt = hdev->reset_info.compute_reset_cnt;
309 
310 	return copy_to_user(out, &reset_count,
311 		min((size_t) max_size, sizeof(reset_count))) ? -EFAULT : 0;
312 }
313 
314 static int time_sync_info(struct hl_device *hdev, struct hl_info_args *args)
315 {
316 	struct hl_info_time_sync time_sync = {0};
317 	u32 max_size = args->return_size;
318 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
319 
320 	if ((!max_size) || (!out))
321 		return -EINVAL;
322 
323 	time_sync.device_time = hdev->asic_funcs->get_device_time(hdev);
324 	time_sync.host_time = ktime_get_raw_ns();
325 
326 	return copy_to_user(out, &time_sync,
327 		min((size_t) max_size, sizeof(time_sync))) ? -EFAULT : 0;
328 }
329 
330 static int pci_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
331 {
332 	struct hl_device *hdev = hpriv->hdev;
333 	struct hl_info_pci_counters pci_counters = {0};
334 	u32 max_size = args->return_size;
335 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
336 	int rc;
337 
338 	if ((!max_size) || (!out))
339 		return -EINVAL;
340 
341 	rc = hl_fw_cpucp_pci_counters_get(hdev, &pci_counters);
342 	if (rc)
343 		return rc;
344 
345 	return copy_to_user(out, &pci_counters,
346 		min((size_t) max_size, sizeof(pci_counters))) ? -EFAULT : 0;
347 }
348 
349 static int clk_throttle_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
350 {
351 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
352 	struct hl_device *hdev = hpriv->hdev;
353 	struct hl_info_clk_throttle clk_throttle = {0};
354 	ktime_t end_time, zero_time = ktime_set(0, 0);
355 	u32 max_size = args->return_size;
356 	int i;
357 
358 	if ((!max_size) || (!out))
359 		return -EINVAL;
360 
361 	mutex_lock(&hdev->clk_throttling.lock);
362 
363 	clk_throttle.clk_throttling_reason = hdev->clk_throttling.current_reason;
364 
365 	for (i = 0 ; i < HL_CLK_THROTTLE_TYPE_MAX ; i++) {
366 		if (!(hdev->clk_throttling.aggregated_reason & BIT(i)))
367 			continue;
368 
369 		clk_throttle.clk_throttling_timestamp_us[i] =
370 			ktime_to_us(hdev->clk_throttling.timestamp[i].start);
371 
372 		if (ktime_compare(hdev->clk_throttling.timestamp[i].end, zero_time))
373 			end_time = hdev->clk_throttling.timestamp[i].end;
374 		else
375 			end_time = ktime_get();
376 
377 		clk_throttle.clk_throttling_duration_ns[i] =
378 			ktime_to_ns(ktime_sub(end_time,
379 				hdev->clk_throttling.timestamp[i].start));
380 
381 	}
382 	mutex_unlock(&hdev->clk_throttling.lock);
383 
384 	return copy_to_user(out, &clk_throttle,
385 		min((size_t) max_size, sizeof(clk_throttle))) ? -EFAULT : 0;
386 }
387 
388 static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
389 {
390 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
391 	struct hl_info_cs_counters cs_counters = {0};
392 	struct hl_device *hdev = hpriv->hdev;
393 	struct hl_cs_counters_atomic *cntr;
394 	u32 max_size = args->return_size;
395 
396 	cntr = &hdev->aggregated_cs_counters;
397 
398 	if ((!max_size) || (!out))
399 		return -EINVAL;
400 
401 	cs_counters.total_out_of_mem_drop_cnt =
402 			atomic64_read(&cntr->out_of_mem_drop_cnt);
403 	cs_counters.total_parsing_drop_cnt =
404 			atomic64_read(&cntr->parsing_drop_cnt);
405 	cs_counters.total_queue_full_drop_cnt =
406 			atomic64_read(&cntr->queue_full_drop_cnt);
407 	cs_counters.total_device_in_reset_drop_cnt =
408 			atomic64_read(&cntr->device_in_reset_drop_cnt);
409 	cs_counters.total_max_cs_in_flight_drop_cnt =
410 			atomic64_read(&cntr->max_cs_in_flight_drop_cnt);
411 	cs_counters.total_validation_drop_cnt =
412 			atomic64_read(&cntr->validation_drop_cnt);
413 
414 	if (hpriv->ctx) {
415 		cs_counters.ctx_out_of_mem_drop_cnt =
416 				atomic64_read(
417 				&hpriv->ctx->cs_counters.out_of_mem_drop_cnt);
418 		cs_counters.ctx_parsing_drop_cnt =
419 				atomic64_read(
420 				&hpriv->ctx->cs_counters.parsing_drop_cnt);
421 		cs_counters.ctx_queue_full_drop_cnt =
422 				atomic64_read(
423 				&hpriv->ctx->cs_counters.queue_full_drop_cnt);
424 		cs_counters.ctx_device_in_reset_drop_cnt =
425 				atomic64_read(
426 			&hpriv->ctx->cs_counters.device_in_reset_drop_cnt);
427 		cs_counters.ctx_max_cs_in_flight_drop_cnt =
428 				atomic64_read(
429 			&hpriv->ctx->cs_counters.max_cs_in_flight_drop_cnt);
430 		cs_counters.ctx_validation_drop_cnt =
431 				atomic64_read(
432 				&hpriv->ctx->cs_counters.validation_drop_cnt);
433 	}
434 
435 	return copy_to_user(out, &cs_counters,
436 		min((size_t) max_size, sizeof(cs_counters))) ? -EFAULT : 0;
437 }
438 
439 static int sync_manager_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
440 {
441 	struct hl_device *hdev = hpriv->hdev;
442 	struct asic_fixed_properties *prop = &hdev->asic_prop;
443 	struct hl_info_sync_manager sm_info = {0};
444 	u32 max_size = args->return_size;
445 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
446 
447 	if ((!max_size) || (!out))
448 		return -EINVAL;
449 
450 	if (args->dcore_id >= HL_MAX_DCORES)
451 		return -EINVAL;
452 
453 	sm_info.first_available_sync_object =
454 			prop->first_available_user_sob[args->dcore_id];
455 	sm_info.first_available_monitor =
456 			prop->first_available_user_mon[args->dcore_id];
457 	sm_info.first_available_cq =
458 			prop->first_available_cq[args->dcore_id];
459 
460 	return copy_to_user(out, &sm_info, min_t(size_t, (size_t) max_size,
461 			sizeof(sm_info))) ? -EFAULT : 0;
462 }
463 
464 static int total_energy_consumption_info(struct hl_fpriv *hpriv,
465 			struct hl_info_args *args)
466 {
467 	struct hl_device *hdev = hpriv->hdev;
468 	struct hl_info_energy total_energy = {0};
469 	u32 max_size = args->return_size;
470 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
471 	int rc;
472 
473 	if ((!max_size) || (!out))
474 		return -EINVAL;
475 
476 	rc = hl_fw_cpucp_total_energy_get(hdev,
477 			&total_energy.total_energy_consumption);
478 	if (rc)
479 		return rc;
480 
481 	return copy_to_user(out, &total_energy,
482 		min((size_t) max_size, sizeof(total_energy))) ? -EFAULT : 0;
483 }
484 
485 static int pll_frequency_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
486 {
487 	struct hl_device *hdev = hpriv->hdev;
488 	struct hl_pll_frequency_info freq_info = { {0} };
489 	u32 max_size = args->return_size;
490 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
491 	int rc;
492 
493 	if ((!max_size) || (!out))
494 		return -EINVAL;
495 
496 	rc = hl_fw_cpucp_pll_info_get(hdev, args->pll_index, freq_info.output);
497 	if (rc)
498 		return rc;
499 
500 	return copy_to_user(out, &freq_info,
501 		min((size_t) max_size, sizeof(freq_info))) ? -EFAULT : 0;
502 }
503 
504 static int power_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
505 {
506 	struct hl_device *hdev = hpriv->hdev;
507 	u32 max_size = args->return_size;
508 	struct hl_power_info power_info = {0};
509 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
510 	int rc;
511 
512 	if ((!max_size) || (!out))
513 		return -EINVAL;
514 
515 	rc = hl_fw_cpucp_power_get(hdev, &power_info.power);
516 	if (rc)
517 		return rc;
518 
519 	return copy_to_user(out, &power_info,
520 		min((size_t) max_size, sizeof(power_info))) ? -EFAULT : 0;
521 }
522 
523 static int open_stats_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
524 {
525 	struct hl_device *hdev = hpriv->hdev;
526 	u32 max_size = args->return_size;
527 	struct hl_open_stats_info open_stats_info = {0};
528 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
529 
530 	if ((!max_size) || (!out))
531 		return -EINVAL;
532 
533 	open_stats_info.last_open_period_ms = jiffies64_to_msecs(
534 		hdev->last_open_session_duration_jif);
535 	open_stats_info.open_counter = hdev->open_counter;
536 	open_stats_info.is_compute_ctx_active = hdev->is_compute_ctx_active;
537 	open_stats_info.compute_ctx_in_release = hdev->compute_ctx_in_release;
538 
539 	return copy_to_user(out, &open_stats_info,
540 		min((size_t) max_size, sizeof(open_stats_info))) ? -EFAULT : 0;
541 }
542 
543 static int dram_pending_rows_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
544 {
545 	struct hl_device *hdev = hpriv->hdev;
546 	u32 max_size = args->return_size;
547 	u32 pend_rows_num = 0;
548 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
549 	int rc;
550 
551 	if ((!max_size) || (!out))
552 		return -EINVAL;
553 
554 	rc = hl_fw_dram_pending_row_get(hdev, &pend_rows_num);
555 	if (rc)
556 		return rc;
557 
558 	return copy_to_user(out, &pend_rows_num,
559 			min_t(size_t, max_size, sizeof(pend_rows_num))) ? -EFAULT : 0;
560 }
561 
562 static int dram_replaced_rows_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
563 {
564 	struct hl_device *hdev = hpriv->hdev;
565 	u32 max_size = args->return_size;
566 	struct cpucp_hbm_row_info info = {0};
567 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
568 	int rc;
569 
570 	if ((!max_size) || (!out))
571 		return -EINVAL;
572 
573 	rc = hl_fw_dram_replaced_row_get(hdev, &info);
574 	if (rc)
575 		return rc;
576 
577 	return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
578 }
579 
580 static int last_err_open_dev_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
581 {
582 	struct hl_info_last_err_open_dev_time info = {0};
583 	struct hl_device *hdev = hpriv->hdev;
584 	u32 max_size = args->return_size;
585 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
586 
587 	if ((!max_size) || (!out))
588 		return -EINVAL;
589 
590 	info.timestamp = ktime_to_ns(hdev->last_successful_open_ktime);
591 
592 	return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
593 }
594 
595 static int cs_timeout_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
596 {
597 	struct hl_info_cs_timeout_event info = {0};
598 	struct hl_device *hdev = hpriv->hdev;
599 	u32 max_size = args->return_size;
600 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
601 
602 	if ((!max_size) || (!out))
603 		return -EINVAL;
604 
605 	info.seq = hdev->captured_err_info.cs_timeout.seq;
606 	info.timestamp = ktime_to_ns(hdev->captured_err_info.cs_timeout.timestamp);
607 
608 	return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
609 }
610 
611 static int razwi_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
612 {
613 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
614 	struct hl_device *hdev = hpriv->hdev;
615 	u32 max_size = args->return_size;
616 	struct razwi_info *razwi_info;
617 
618 	if ((!max_size) || (!out))
619 		return -EINVAL;
620 
621 	razwi_info = &hdev->captured_err_info.razwi_info;
622 	if (!razwi_info->razwi_info_available)
623 		return 0;
624 
625 	return copy_to_user(out, &razwi_info->razwi,
626 			min_t(size_t, max_size, sizeof(struct hl_info_razwi_event))) ? -EFAULT : 0;
627 }
628 
629 static int undefined_opcode_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
630 {
631 	struct hl_device *hdev = hpriv->hdev;
632 	u32 max_size = args->return_size;
633 	struct hl_info_undefined_opcode_event info = {0};
634 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
635 
636 	if ((!max_size) || (!out))
637 		return -EINVAL;
638 
639 	info.timestamp = ktime_to_ns(hdev->captured_err_info.undef_opcode.timestamp);
640 	info.engine_id = hdev->captured_err_info.undef_opcode.engine_id;
641 	info.cq_addr = hdev->captured_err_info.undef_opcode.cq_addr;
642 	info.cq_size = hdev->captured_err_info.undef_opcode.cq_size;
643 	info.stream_id = hdev->captured_err_info.undef_opcode.stream_id;
644 	info.cb_addr_streams_len = hdev->captured_err_info.undef_opcode.cb_addr_streams_len;
645 	memcpy(info.cb_addr_streams, hdev->captured_err_info.undef_opcode.cb_addr_streams,
646 			sizeof(info.cb_addr_streams));
647 
648 	return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
649 }
650 
651 static int dev_mem_alloc_page_sizes_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
652 {
653 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
654 	struct hl_info_dev_memalloc_page_sizes info = {0};
655 	struct hl_device *hdev = hpriv->hdev;
656 	u32 max_size = args->return_size;
657 
658 	if ((!max_size) || (!out))
659 		return -EINVAL;
660 
661 	/*
662 	 * Future ASICs that will support multiple DRAM page sizes will support only "powers of 2"
663 	 * pages (unlike some of the ASICs before supporting multiple page sizes).
664 	 * For this reason for all ASICs that not support multiple page size the function will
665 	 * return an empty bitmask indicating that multiple page sizes is not supported.
666 	 */
667 	info.page_order_bitmask = hdev->asic_prop.dmmu.supported_pages_mask;
668 
669 	return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0;
670 }
671 
672 static int sec_attest_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
673 {
674 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
675 	struct cpucp_sec_attest_info *sec_attest_info;
676 	struct hl_info_sec_attest *info;
677 	u32 max_size = args->return_size;
678 	int rc;
679 
680 	if ((!max_size) || (!out))
681 		return -EINVAL;
682 
683 	sec_attest_info = kmalloc(sizeof(*sec_attest_info), GFP_KERNEL);
684 	if (!sec_attest_info)
685 		return -ENOMEM;
686 
687 	info = kmalloc(sizeof(*info), GFP_KERNEL);
688 	if (!info) {
689 		rc = -ENOMEM;
690 		goto free_sec_attest_info;
691 	}
692 
693 	rc = hl_fw_get_sec_attest_info(hpriv->hdev, sec_attest_info, args->sec_attest_nonce);
694 	if (rc)
695 		goto free_info;
696 
697 	info->nonce = le32_to_cpu(sec_attest_info->nonce);
698 	info->pcr_quote_len = le16_to_cpu(sec_attest_info->pcr_quote_len);
699 	info->pub_data_len = le16_to_cpu(sec_attest_info->pub_data_len);
700 	info->certificate_len = le16_to_cpu(sec_attest_info->certificate_len);
701 	info->pcr_num_reg = sec_attest_info->pcr_num_reg;
702 	info->pcr_reg_len = sec_attest_info->pcr_reg_len;
703 	info->quote_sig_len = sec_attest_info->quote_sig_len;
704 	memcpy(&info->pcr_data, &sec_attest_info->pcr_data, sizeof(info->pcr_data));
705 	memcpy(&info->pcr_quote, &sec_attest_info->pcr_quote, sizeof(info->pcr_quote));
706 	memcpy(&info->public_data, &sec_attest_info->public_data, sizeof(info->public_data));
707 	memcpy(&info->certificate, &sec_attest_info->certificate, sizeof(info->certificate));
708 	memcpy(&info->quote_sig, &sec_attest_info->quote_sig, sizeof(info->quote_sig));
709 
710 	rc = copy_to_user(out, info,
711 				min_t(size_t, max_size, sizeof(*info))) ? -EFAULT : 0;
712 
713 free_info:
714 	kfree(info);
715 free_sec_attest_info:
716 	kfree(sec_attest_info);
717 
718 	return rc;
719 }
720 
721 static int eventfd_register(struct hl_fpriv *hpriv, struct hl_info_args *args)
722 {
723 	int rc;
724 
725 	/* check if there is already a registered on that process */
726 	mutex_lock(&hpriv->notifier_event.lock);
727 	if (hpriv->notifier_event.eventfd) {
728 		mutex_unlock(&hpriv->notifier_event.lock);
729 		return -EINVAL;
730 	}
731 
732 	hpriv->notifier_event.eventfd = eventfd_ctx_fdget(args->eventfd);
733 	if (IS_ERR(hpriv->notifier_event.eventfd)) {
734 		rc = PTR_ERR(hpriv->notifier_event.eventfd);
735 		hpriv->notifier_event.eventfd = NULL;
736 		mutex_unlock(&hpriv->notifier_event.lock);
737 		return rc;
738 	}
739 
740 	mutex_unlock(&hpriv->notifier_event.lock);
741 	return 0;
742 }
743 
744 static int eventfd_unregister(struct hl_fpriv *hpriv, struct hl_info_args *args)
745 {
746 	mutex_lock(&hpriv->notifier_event.lock);
747 	if (!hpriv->notifier_event.eventfd) {
748 		mutex_unlock(&hpriv->notifier_event.lock);
749 		return -EINVAL;
750 	}
751 
752 	eventfd_ctx_put(hpriv->notifier_event.eventfd);
753 	hpriv->notifier_event.eventfd = NULL;
754 	mutex_unlock(&hpriv->notifier_event.lock);
755 	return 0;
756 }
757 
758 static int engine_status_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
759 {
760 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
761 	u32 status_buf_size = args->return_size;
762 	struct hl_device *hdev = hpriv->hdev;
763 	struct engines_data eng_data;
764 	int rc;
765 
766 	if ((status_buf_size < SZ_1K) || (status_buf_size > HL_ENGINES_DATA_MAX_SIZE) || (!out))
767 		return -EINVAL;
768 
769 	eng_data.actual_size = 0;
770 	eng_data.allocated_buf_size = status_buf_size;
771 	eng_data.buf = vmalloc(status_buf_size);
772 	if (!eng_data.buf)
773 		return -ENOMEM;
774 
775 	hdev->asic_funcs->is_device_idle(hdev, NULL, 0, &eng_data);
776 
777 	if (eng_data.actual_size > eng_data.allocated_buf_size) {
778 		dev_err(hdev->dev,
779 			"Engines data size (%d Bytes) is bigger than allocated size (%u Bytes)\n",
780 			eng_data.actual_size, status_buf_size);
781 		vfree(eng_data.buf);
782 		return -ENOMEM;
783 	}
784 
785 	args->user_buffer_actual_size = eng_data.actual_size;
786 	rc = copy_to_user(out, eng_data.buf, min_t(size_t, status_buf_size, eng_data.actual_size)) ?
787 				-EFAULT : 0;
788 
789 	vfree(eng_data.buf);
790 
791 	return rc;
792 }
793 
794 static int page_fault_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
795 {
796 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
797 	struct hl_device *hdev = hpriv->hdev;
798 	u32 max_size = args->return_size;
799 	struct page_fault_info *pgf_info;
800 
801 	if ((!max_size) || (!out))
802 		return -EINVAL;
803 
804 	pgf_info = &hdev->captured_err_info.page_fault_info;
805 	if (!pgf_info->page_fault_info_available)
806 		return 0;
807 
808 	return copy_to_user(out, &pgf_info->page_fault,
809 			min_t(size_t, max_size, sizeof(struct hl_page_fault_info))) ? -EFAULT : 0;
810 }
811 
812 static int user_mappings_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
813 {
814 	void __user *out = (void __user *) (uintptr_t) args->return_pointer;
815 	u32 user_buf_size = args->return_size;
816 	struct hl_device *hdev = hpriv->hdev;
817 	struct page_fault_info *pgf_info;
818 	u64 actual_size;
819 
820 	if (!out)
821 		return -EINVAL;
822 
823 	pgf_info = &hdev->captured_err_info.page_fault_info;
824 	if (!pgf_info->page_fault_info_available)
825 		return 0;
826 
827 	args->array_size = pgf_info->num_of_user_mappings;
828 
829 	actual_size = pgf_info->num_of_user_mappings * sizeof(struct hl_user_mapping);
830 	if (user_buf_size < actual_size)
831 		return -ENOMEM;
832 
833 	return copy_to_user(out, pgf_info->user_mappings, actual_size) ? -EFAULT : 0;
834 }
835 
836 static int hw_err_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
837 {
838 	void __user *user_buf = (void __user *) (uintptr_t) args->return_pointer;
839 	struct hl_device *hdev = hpriv->hdev;
840 	u32 user_buf_size = args->return_size;
841 	struct hw_err_info *info;
842 	int rc;
843 
844 	if ((!user_buf_size) || (!user_buf))
845 		return -EINVAL;
846 
847 	if (user_buf_size < sizeof(struct hl_info_hw_err_event))
848 		return -ENOMEM;
849 
850 	info = &hdev->captured_err_info.hw_err;
851 	if (!info->event_info_available)
852 		return -ENOENT;
853 
854 	rc = copy_to_user(user_buf, &info->event, sizeof(struct hl_info_hw_err_event));
855 	return rc ? -EFAULT : 0;
856 }
857 
858 static int fw_err_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
859 {
860 	void __user *user_buf = (void __user *) (uintptr_t) args->return_pointer;
861 	struct hl_device *hdev = hpriv->hdev;
862 	u32 user_buf_size = args->return_size;
863 	struct fw_err_info *info;
864 	int rc;
865 
866 	if ((!user_buf_size) || (!user_buf))
867 		return -EINVAL;
868 
869 	if (user_buf_size < sizeof(struct hl_info_fw_err_event))
870 		return -ENOMEM;
871 
872 	info = &hdev->captured_err_info.fw_err;
873 	if (!info->event_info_available)
874 		return -ENOENT;
875 
876 	rc = copy_to_user(user_buf, &info->event, sizeof(struct hl_info_fw_err_event));
877 	return rc ? -EFAULT : 0;
878 }
879 
880 static int send_fw_generic_request(struct hl_device *hdev, struct hl_info_args *info_args)
881 {
882 	void __user *buff = (void __user *) (uintptr_t) info_args->return_pointer;
883 	u32 size = info_args->return_size;
884 	dma_addr_t dma_handle;
885 	bool need_input_buff;
886 	void *fw_buff;
887 	int rc = 0;
888 
889 	switch (info_args->fw_sub_opcode) {
890 	case HL_PASSTHROUGH_VERSIONS:
891 		need_input_buff = false;
892 		break;
893 	default:
894 		return -EINVAL;
895 	}
896 
897 	if (size > SZ_1M) {
898 		dev_err(hdev->dev, "buffer size cannot exceed 1MB\n");
899 		return -EINVAL;
900 	}
901 
902 	fw_buff = hl_cpu_accessible_dma_pool_alloc(hdev, size, &dma_handle);
903 	if (!fw_buff)
904 		return -ENOMEM;
905 
906 
907 	if (need_input_buff && copy_from_user(fw_buff, buff, size)) {
908 		dev_dbg(hdev->dev, "Failed to copy from user FW buff\n");
909 		rc = -EFAULT;
910 		goto free_buff;
911 	}
912 
913 	rc = hl_fw_send_generic_request(hdev, info_args->fw_sub_opcode, dma_handle, &size);
914 	if (rc)
915 		goto free_buff;
916 
917 	if (copy_to_user(buff, fw_buff, min(size, info_args->return_size))) {
918 		dev_dbg(hdev->dev, "Failed to copy to user FW generic req output\n");
919 		rc = -EFAULT;
920 	}
921 
922 free_buff:
923 	hl_cpu_accessible_dma_pool_free(hdev, info_args->return_size, fw_buff);
924 
925 	return rc;
926 }
927 
928 static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
929 				struct device *dev)
930 {
931 	enum hl_device_status status;
932 	struct hl_info_args *args = data;
933 	struct hl_device *hdev = hpriv->hdev;
934 	int rc;
935 
936 	if (args->pad) {
937 		dev_dbg(hdev->dev, "Padding bytes must be 0\n");
938 		return -EINVAL;
939 	}
940 
941 	/*
942 	 * Information is returned for the following opcodes even if the device
943 	 * is disabled or in reset.
944 	 */
945 	switch (args->op) {
946 	case HL_INFO_HW_IP_INFO:
947 		return hw_ip_info(hdev, args);
948 
949 	case HL_INFO_DEVICE_STATUS:
950 		return device_status_info(hdev, args);
951 
952 	case HL_INFO_RESET_COUNT:
953 		return get_reset_count(hdev, args);
954 
955 	case HL_INFO_HW_EVENTS:
956 		return hw_events_info(hdev, false, args);
957 
958 	case HL_INFO_HW_EVENTS_AGGREGATE:
959 		return hw_events_info(hdev, true, args);
960 
961 	case HL_INFO_CS_COUNTERS:
962 		return cs_counters_info(hpriv, args);
963 
964 	case HL_INFO_CLK_THROTTLE_REASON:
965 		return clk_throttle_info(hpriv, args);
966 
967 	case HL_INFO_SYNC_MANAGER:
968 		return sync_manager_info(hpriv, args);
969 
970 	case HL_INFO_OPEN_STATS:
971 		return open_stats_info(hpriv, args);
972 
973 	case HL_INFO_LAST_ERR_OPEN_DEV_TIME:
974 		return last_err_open_dev_info(hpriv, args);
975 
976 	case HL_INFO_CS_TIMEOUT_EVENT:
977 		return cs_timeout_info(hpriv, args);
978 
979 	case HL_INFO_RAZWI_EVENT:
980 		return razwi_info(hpriv, args);
981 
982 	case HL_INFO_UNDEFINED_OPCODE_EVENT:
983 		return undefined_opcode_info(hpriv, args);
984 
985 	case HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES:
986 		return dev_mem_alloc_page_sizes_info(hpriv, args);
987 
988 	case HL_INFO_GET_EVENTS:
989 		return events_info(hpriv, args);
990 
991 	case HL_INFO_PAGE_FAULT_EVENT:
992 		return page_fault_info(hpriv, args);
993 
994 	case HL_INFO_USER_MAPPINGS:
995 		return user_mappings_info(hpriv, args);
996 
997 	case HL_INFO_UNREGISTER_EVENTFD:
998 		return eventfd_unregister(hpriv, args);
999 
1000 	case HL_INFO_HW_ERR_EVENT:
1001 		return hw_err_info(hpriv, args);
1002 
1003 	case HL_INFO_FW_ERR_EVENT:
1004 		return fw_err_info(hpriv, args);
1005 
1006 	case HL_INFO_DRAM_USAGE:
1007 		return dram_usage_info(hpriv, args);
1008 	default:
1009 		break;
1010 	}
1011 
1012 	if (!hl_device_operational(hdev, &status)) {
1013 		dev_dbg_ratelimited(dev,
1014 			"Device is %s. Can't execute INFO IOCTL\n",
1015 			hdev->status[status]);
1016 		return -EBUSY;
1017 	}
1018 
1019 	switch (args->op) {
1020 	case HL_INFO_HW_IDLE:
1021 		rc = hw_idle(hdev, args);
1022 		break;
1023 
1024 	case HL_INFO_DEVICE_UTILIZATION:
1025 		rc = device_utilization(hdev, args);
1026 		break;
1027 
1028 	case HL_INFO_CLK_RATE:
1029 		rc = get_clk_rate(hdev, args);
1030 		break;
1031 
1032 	case HL_INFO_TIME_SYNC:
1033 		return time_sync_info(hdev, args);
1034 
1035 	case HL_INFO_PCI_COUNTERS:
1036 		return pci_counters_info(hpriv, args);
1037 
1038 	case HL_INFO_TOTAL_ENERGY:
1039 		return total_energy_consumption_info(hpriv, args);
1040 
1041 	case HL_INFO_PLL_FREQUENCY:
1042 		return pll_frequency_info(hpriv, args);
1043 
1044 	case HL_INFO_POWER:
1045 		return power_info(hpriv, args);
1046 
1047 
1048 	case HL_INFO_DRAM_REPLACED_ROWS:
1049 		return dram_replaced_rows_info(hpriv, args);
1050 
1051 	case HL_INFO_DRAM_PENDING_ROWS:
1052 		return dram_pending_rows_info(hpriv, args);
1053 
1054 	case HL_INFO_SECURED_ATTESTATION:
1055 		return sec_attest_info(hpriv, args);
1056 
1057 	case HL_INFO_REGISTER_EVENTFD:
1058 		return eventfd_register(hpriv, args);
1059 
1060 	case HL_INFO_ENGINE_STATUS:
1061 		return engine_status_info(hpriv, args);
1062 
1063 	case HL_INFO_FW_GENERIC_REQ:
1064 		return send_fw_generic_request(hdev, args);
1065 
1066 	default:
1067 		dev_err(dev, "Invalid request %d\n", args->op);
1068 		rc = -EINVAL;
1069 		break;
1070 	}
1071 
1072 	return rc;
1073 }
1074 
1075 static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data)
1076 {
1077 	return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev);
1078 }
1079 
1080 static int hl_info_ioctl_control(struct hl_fpriv *hpriv, void *data)
1081 {
1082 	return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev_ctrl);
1083 }
1084 
1085 static int hl_debug_ioctl(struct hl_fpriv *hpriv, void *data)
1086 {
1087 	struct hl_debug_args *args = data;
1088 	struct hl_device *hdev = hpriv->hdev;
1089 	enum hl_device_status status;
1090 
1091 	int rc = 0;
1092 
1093 	if (!hl_device_operational(hdev, &status)) {
1094 		dev_dbg_ratelimited(hdev->dev,
1095 			"Device is %s. Can't execute DEBUG IOCTL\n",
1096 			hdev->status[status]);
1097 		return -EBUSY;
1098 	}
1099 
1100 	switch (args->op) {
1101 	case HL_DEBUG_OP_ETR:
1102 	case HL_DEBUG_OP_ETF:
1103 	case HL_DEBUG_OP_STM:
1104 	case HL_DEBUG_OP_FUNNEL:
1105 	case HL_DEBUG_OP_BMON:
1106 	case HL_DEBUG_OP_SPMU:
1107 	case HL_DEBUG_OP_TIMESTAMP:
1108 		if (!hdev->in_debug) {
1109 			dev_err_ratelimited(hdev->dev,
1110 				"Rejecting debug configuration request because device not in debug mode\n");
1111 			return -EFAULT;
1112 		}
1113 		args->input_size = min(args->input_size, hl_debug_struct_size[args->op]);
1114 		rc = debug_coresight(hdev, hpriv->ctx, args);
1115 		break;
1116 
1117 	case HL_DEBUG_OP_SET_MODE:
1118 		rc = hl_device_set_debug_mode(hdev, hpriv->ctx, (bool) args->enable);
1119 		break;
1120 
1121 	default:
1122 		dev_err(hdev->dev, "Invalid request %d\n", args->op);
1123 		rc = -EINVAL;
1124 		break;
1125 	}
1126 
1127 	return rc;
1128 }
1129 
1130 #define HL_IOCTL_DEF(ioctl, _func) \
1131 	[_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func}
1132 
1133 static const struct hl_ioctl_desc hl_ioctls[] = {
1134 	HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl),
1135 	HL_IOCTL_DEF(HL_IOCTL_CB, hl_cb_ioctl),
1136 	HL_IOCTL_DEF(HL_IOCTL_CS, hl_cs_ioctl),
1137 	HL_IOCTL_DEF(HL_IOCTL_WAIT_CS, hl_wait_ioctl),
1138 	HL_IOCTL_DEF(HL_IOCTL_MEMORY, hl_mem_ioctl),
1139 	HL_IOCTL_DEF(HL_IOCTL_DEBUG, hl_debug_ioctl)
1140 };
1141 
1142 static const struct hl_ioctl_desc hl_ioctls_control[] = {
1143 	HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl_control)
1144 };
1145 
1146 static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg,
1147 		const struct hl_ioctl_desc *ioctl, struct device *dev)
1148 {
1149 	struct hl_fpriv *hpriv = filep->private_data;
1150 	unsigned int nr = _IOC_NR(cmd);
1151 	char stack_kdata[128] = {0};
1152 	char *kdata = NULL;
1153 	unsigned int usize, asize;
1154 	hl_ioctl_t *func;
1155 	u32 hl_size;
1156 	int retcode;
1157 
1158 	/* Do not trust userspace, use our own definition */
1159 	func = ioctl->func;
1160 
1161 	if (unlikely(!func)) {
1162 		dev_dbg(dev, "no function\n");
1163 		retcode = -ENOTTY;
1164 		goto out_err;
1165 	}
1166 
1167 	hl_size = _IOC_SIZE(ioctl->cmd);
1168 	usize = asize = _IOC_SIZE(cmd);
1169 	if (hl_size > asize)
1170 		asize = hl_size;
1171 
1172 	cmd = ioctl->cmd;
1173 
1174 	if (cmd & (IOC_IN | IOC_OUT)) {
1175 		if (asize <= sizeof(stack_kdata)) {
1176 			kdata = stack_kdata;
1177 		} else {
1178 			kdata = kzalloc(asize, GFP_KERNEL);
1179 			if (!kdata) {
1180 				retcode = -ENOMEM;
1181 				goto out_err;
1182 			}
1183 		}
1184 	}
1185 
1186 	if (cmd & IOC_IN) {
1187 		if (copy_from_user(kdata, (void __user *)arg, usize)) {
1188 			retcode = -EFAULT;
1189 			goto out_err;
1190 		}
1191 	}
1192 
1193 	retcode = func(hpriv, kdata);
1194 
1195 	if ((cmd & IOC_OUT) && copy_to_user((void __user *)arg, kdata, usize))
1196 		retcode = -EFAULT;
1197 
1198 out_err:
1199 	if (retcode)
1200 		dev_dbg(dev, "error in ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
1201 			  task_pid_nr(current), cmd, nr);
1202 
1203 	if (kdata != stack_kdata)
1204 		kfree(kdata);
1205 
1206 	return retcode;
1207 }
1208 
1209 long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
1210 {
1211 	struct hl_fpriv *hpriv = filep->private_data;
1212 	struct hl_device *hdev = hpriv->hdev;
1213 	const struct hl_ioctl_desc *ioctl = NULL;
1214 	unsigned int nr = _IOC_NR(cmd);
1215 
1216 	if (!hdev) {
1217 		pr_err_ratelimited("Sending ioctl after device was removed! Please close FD\n");
1218 		return -ENODEV;
1219 	}
1220 
1221 	if ((nr >= HL_COMMAND_START) && (nr < HL_COMMAND_END)) {
1222 		ioctl = &hl_ioctls[nr];
1223 	} else {
1224 		dev_err(hdev->dev, "invalid ioctl: pid=%d, nr=0x%02x\n",
1225 			task_pid_nr(current), nr);
1226 		return -ENOTTY;
1227 	}
1228 
1229 	return _hl_ioctl(filep, cmd, arg, ioctl, hdev->dev);
1230 }
1231 
1232 long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg)
1233 {
1234 	struct hl_fpriv *hpriv = filep->private_data;
1235 	struct hl_device *hdev = hpriv->hdev;
1236 	const struct hl_ioctl_desc *ioctl = NULL;
1237 	unsigned int nr = _IOC_NR(cmd);
1238 
1239 	if (!hdev) {
1240 		pr_err_ratelimited("Sending ioctl after device was removed! Please close FD\n");
1241 		return -ENODEV;
1242 	}
1243 
1244 	if (nr == _IOC_NR(HL_IOCTL_INFO)) {
1245 		ioctl = &hl_ioctls_control[nr];
1246 	} else {
1247 		dev_err(hdev->dev_ctrl, "invalid ioctl: pid=%d, nr=0x%02x\n",
1248 			task_pid_nr(current), nr);
1249 		return -ENOTTY;
1250 	}
1251 
1252 	return _hl_ioctl(filep, cmd, arg, ioctl, hdev->dev_ctrl);
1253 }
1254