1 // SPDX-License-Identifier: GPL-2.0 2 3 /* 4 * Copyright 2016-2022 HabanaLabs, Ltd. 5 * All Rights Reserved. 6 */ 7 8 #define pr_fmt(fmt) "habanalabs: " fmt 9 10 #include <uapi/drm/habanalabs_accel.h> 11 #include "habanalabs.h" 12 13 #include <linux/fs.h> 14 #include <linux/kernel.h> 15 #include <linux/pci.h> 16 #include <linux/slab.h> 17 #include <linux/uaccess.h> 18 #include <linux/vmalloc.h> 19 20 static u32 hl_debug_struct_size[HL_DEBUG_OP_TIMESTAMP + 1] = { 21 [HL_DEBUG_OP_ETR] = sizeof(struct hl_debug_params_etr), 22 [HL_DEBUG_OP_ETF] = sizeof(struct hl_debug_params_etf), 23 [HL_DEBUG_OP_STM] = sizeof(struct hl_debug_params_stm), 24 [HL_DEBUG_OP_FUNNEL] = 0, 25 [HL_DEBUG_OP_BMON] = sizeof(struct hl_debug_params_bmon), 26 [HL_DEBUG_OP_SPMU] = sizeof(struct hl_debug_params_spmu), 27 [HL_DEBUG_OP_TIMESTAMP] = 0 28 29 }; 30 31 static int device_status_info(struct hl_device *hdev, struct hl_info_args *args) 32 { 33 struct hl_info_device_status dev_stat = {0}; 34 u32 size = args->return_size; 35 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 36 37 if ((!size) || (!out)) 38 return -EINVAL; 39 40 dev_stat.status = hl_device_status(hdev); 41 42 return copy_to_user(out, &dev_stat, 43 min((size_t)size, sizeof(dev_stat))) ? -EFAULT : 0; 44 } 45 46 static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args) 47 { 48 struct hl_info_hw_ip_info hw_ip = {0}; 49 u32 size = args->return_size; 50 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 51 struct asic_fixed_properties *prop = &hdev->asic_prop; 52 u64 sram_kmd_size, dram_kmd_size, dram_available_size; 53 54 if ((!size) || (!out)) 55 return -EINVAL; 56 57 sram_kmd_size = (prop->sram_user_base_address - 58 prop->sram_base_address); 59 dram_kmd_size = (prop->dram_user_base_address - 60 prop->dram_base_address); 61 62 hw_ip.device_id = hdev->asic_funcs->get_pci_id(hdev); 63 hw_ip.sram_base_address = prop->sram_user_base_address; 64 hw_ip.dram_base_address = 65 hdev->mmu_enable && prop->dram_supports_virtual_memory ? 66 prop->dmmu.start_addr : prop->dram_user_base_address; 67 hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask & 0xFF; 68 hw_ip.tpc_enabled_mask_ext = prop->tpc_enabled_mask; 69 70 hw_ip.sram_size = prop->sram_size - sram_kmd_size; 71 72 dram_available_size = prop->dram_size - dram_kmd_size; 73 74 if (hdev->mmu_enable == MMU_EN_ALL) 75 hw_ip.dram_size = DIV_ROUND_DOWN_ULL(dram_available_size, 76 prop->dram_page_size) * prop->dram_page_size; 77 else 78 hw_ip.dram_size = dram_available_size; 79 80 if (hw_ip.dram_size > PAGE_SIZE) 81 hw_ip.dram_enabled = 1; 82 83 hw_ip.dram_page_size = prop->dram_page_size; 84 hw_ip.device_mem_alloc_default_page_size = prop->device_mem_alloc_default_page_size; 85 hw_ip.num_of_events = prop->num_of_events; 86 87 memcpy(hw_ip.cpucp_version, prop->cpucp_info.cpucp_version, 88 min(VERSION_MAX_LEN, HL_INFO_VERSION_MAX_LEN)); 89 90 memcpy(hw_ip.card_name, prop->cpucp_info.card_name, 91 min(CARD_NAME_MAX_LEN, HL_INFO_CARD_NAME_MAX_LEN)); 92 93 hw_ip.cpld_version = le32_to_cpu(prop->cpucp_info.cpld_version); 94 hw_ip.module_id = le32_to_cpu(prop->cpucp_info.card_location); 95 96 hw_ip.psoc_pci_pll_nr = prop->psoc_pci_pll_nr; 97 hw_ip.psoc_pci_pll_nf = prop->psoc_pci_pll_nf; 98 hw_ip.psoc_pci_pll_od = prop->psoc_pci_pll_od; 99 hw_ip.psoc_pci_pll_div_factor = prop->psoc_pci_pll_div_factor; 100 101 hw_ip.decoder_enabled_mask = prop->decoder_enabled_mask; 102 hw_ip.mme_master_slave_mode = prop->mme_master_slave_mode; 103 hw_ip.first_available_interrupt_id = prop->first_available_user_interrupt; 104 hw_ip.number_of_user_interrupts = prop->user_interrupt_count; 105 hw_ip.tpc_interrupt_id = prop->tpc_interrupt_id; 106 107 hw_ip.edma_enabled_mask = prop->edma_enabled_mask; 108 hw_ip.server_type = prop->server_type; 109 hw_ip.security_enabled = prop->fw_security_enabled; 110 hw_ip.revision_id = hdev->pdev->revision; 111 hw_ip.engine_core_interrupt_reg_addr = prop->engine_core_interrupt_reg_addr; 112 113 return copy_to_user(out, &hw_ip, 114 min((size_t) size, sizeof(hw_ip))) ? -EFAULT : 0; 115 } 116 117 static int hw_events_info(struct hl_device *hdev, bool aggregate, 118 struct hl_info_args *args) 119 { 120 u32 size, max_size = args->return_size; 121 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 122 void *arr; 123 124 if ((!max_size) || (!out)) 125 return -EINVAL; 126 127 arr = hdev->asic_funcs->get_events_stat(hdev, aggregate, &size); 128 if (!arr) { 129 dev_err(hdev->dev, "Events info not supported\n"); 130 return -EOPNOTSUPP; 131 } 132 133 return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0; 134 } 135 136 static int events_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 137 { 138 u32 max_size = args->return_size; 139 u64 events_mask; 140 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 141 142 if ((max_size < sizeof(u64)) || (!out)) 143 return -EINVAL; 144 145 mutex_lock(&hpriv->notifier_event.lock); 146 events_mask = hpriv->notifier_event.events_mask; 147 hpriv->notifier_event.events_mask = 0; 148 mutex_unlock(&hpriv->notifier_event.lock); 149 150 return copy_to_user(out, &events_mask, sizeof(u64)) ? -EFAULT : 0; 151 } 152 153 static int dram_usage_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 154 { 155 struct hl_device *hdev = hpriv->hdev; 156 struct hl_info_dram_usage dram_usage = {0}; 157 u32 max_size = args->return_size; 158 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 159 struct asic_fixed_properties *prop = &hdev->asic_prop; 160 u64 dram_kmd_size; 161 162 if ((!max_size) || (!out)) 163 return -EINVAL; 164 165 dram_kmd_size = (prop->dram_user_base_address - 166 prop->dram_base_address); 167 dram_usage.dram_free_mem = (prop->dram_size - dram_kmd_size) - 168 atomic64_read(&hdev->dram_used_mem); 169 if (hpriv->ctx) 170 dram_usage.ctx_dram_mem = 171 atomic64_read(&hpriv->ctx->dram_phys_mem); 172 173 return copy_to_user(out, &dram_usage, 174 min((size_t) max_size, sizeof(dram_usage))) ? -EFAULT : 0; 175 } 176 177 static int hw_idle(struct hl_device *hdev, struct hl_info_args *args) 178 { 179 struct hl_info_hw_idle hw_idle = {0}; 180 u32 max_size = args->return_size; 181 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 182 183 if ((!max_size) || (!out)) 184 return -EINVAL; 185 186 hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev, 187 hw_idle.busy_engines_mask_ext, 188 HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL); 189 hw_idle.busy_engines_mask = 190 lower_32_bits(hw_idle.busy_engines_mask_ext[0]); 191 192 return copy_to_user(out, &hw_idle, 193 min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0; 194 } 195 196 static int debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, struct hl_debug_args *args) 197 { 198 struct hl_debug_params *params; 199 void *input = NULL, *output = NULL; 200 int rc; 201 202 params = kzalloc(sizeof(*params), GFP_KERNEL); 203 if (!params) 204 return -ENOMEM; 205 206 params->reg_idx = args->reg_idx; 207 params->enable = args->enable; 208 params->op = args->op; 209 210 if (args->input_ptr && args->input_size) { 211 input = kzalloc(hl_debug_struct_size[args->op], GFP_KERNEL); 212 if (!input) { 213 rc = -ENOMEM; 214 goto out; 215 } 216 217 if (copy_from_user(input, u64_to_user_ptr(args->input_ptr), 218 args->input_size)) { 219 rc = -EFAULT; 220 dev_err(hdev->dev, "failed to copy input debug data\n"); 221 goto out; 222 } 223 224 params->input = input; 225 } 226 227 if (args->output_ptr && args->output_size) { 228 output = kzalloc(args->output_size, GFP_KERNEL); 229 if (!output) { 230 rc = -ENOMEM; 231 goto out; 232 } 233 234 params->output = output; 235 params->output_size = args->output_size; 236 } 237 238 rc = hdev->asic_funcs->debug_coresight(hdev, ctx, params); 239 if (rc) { 240 dev_err(hdev->dev, 241 "debug coresight operation failed %d\n", rc); 242 goto out; 243 } 244 245 if (output && copy_to_user((void __user *) (uintptr_t) args->output_ptr, 246 output, args->output_size)) { 247 dev_err(hdev->dev, "copy to user failed in debug ioctl\n"); 248 rc = -EFAULT; 249 goto out; 250 } 251 252 253 out: 254 kfree(params); 255 kfree(output); 256 kfree(input); 257 258 return rc; 259 } 260 261 static int device_utilization(struct hl_device *hdev, struct hl_info_args *args) 262 { 263 struct hl_info_device_utilization device_util = {0}; 264 u32 max_size = args->return_size; 265 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 266 int rc; 267 268 if ((!max_size) || (!out)) 269 return -EINVAL; 270 271 rc = hl_device_utilization(hdev, &device_util.utilization); 272 if (rc) 273 return -EINVAL; 274 275 return copy_to_user(out, &device_util, 276 min((size_t) max_size, sizeof(device_util))) ? -EFAULT : 0; 277 } 278 279 static int get_clk_rate(struct hl_device *hdev, struct hl_info_args *args) 280 { 281 struct hl_info_clk_rate clk_rate = {0}; 282 u32 max_size = args->return_size; 283 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 284 int rc; 285 286 if ((!max_size) || (!out)) 287 return -EINVAL; 288 289 rc = hl_fw_get_clk_rate(hdev, &clk_rate.cur_clk_rate_mhz, &clk_rate.max_clk_rate_mhz); 290 if (rc) 291 return rc; 292 293 return copy_to_user(out, &clk_rate, min_t(size_t, max_size, sizeof(clk_rate))) 294 ? -EFAULT : 0; 295 } 296 297 static int get_reset_count(struct hl_device *hdev, struct hl_info_args *args) 298 { 299 struct hl_info_reset_count reset_count = {0}; 300 u32 max_size = args->return_size; 301 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 302 303 if ((!max_size) || (!out)) 304 return -EINVAL; 305 306 reset_count.hard_reset_cnt = hdev->reset_info.hard_reset_cnt; 307 reset_count.soft_reset_cnt = hdev->reset_info.compute_reset_cnt; 308 309 return copy_to_user(out, &reset_count, 310 min((size_t) max_size, sizeof(reset_count))) ? -EFAULT : 0; 311 } 312 313 static int time_sync_info(struct hl_device *hdev, struct hl_info_args *args) 314 { 315 struct hl_info_time_sync time_sync = {0}; 316 u32 max_size = args->return_size; 317 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 318 319 if ((!max_size) || (!out)) 320 return -EINVAL; 321 322 time_sync.device_time = hdev->asic_funcs->get_device_time(hdev); 323 time_sync.host_time = ktime_get_raw_ns(); 324 325 return copy_to_user(out, &time_sync, 326 min((size_t) max_size, sizeof(time_sync))) ? -EFAULT : 0; 327 } 328 329 static int pci_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 330 { 331 struct hl_device *hdev = hpriv->hdev; 332 struct hl_info_pci_counters pci_counters = {0}; 333 u32 max_size = args->return_size; 334 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 335 int rc; 336 337 if ((!max_size) || (!out)) 338 return -EINVAL; 339 340 rc = hl_fw_cpucp_pci_counters_get(hdev, &pci_counters); 341 if (rc) 342 return rc; 343 344 return copy_to_user(out, &pci_counters, 345 min((size_t) max_size, sizeof(pci_counters))) ? -EFAULT : 0; 346 } 347 348 static int clk_throttle_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 349 { 350 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 351 struct hl_device *hdev = hpriv->hdev; 352 struct hl_info_clk_throttle clk_throttle = {0}; 353 ktime_t end_time, zero_time = ktime_set(0, 0); 354 u32 max_size = args->return_size; 355 int i; 356 357 if ((!max_size) || (!out)) 358 return -EINVAL; 359 360 mutex_lock(&hdev->clk_throttling.lock); 361 362 clk_throttle.clk_throttling_reason = hdev->clk_throttling.current_reason; 363 364 for (i = 0 ; i < HL_CLK_THROTTLE_TYPE_MAX ; i++) { 365 if (!(hdev->clk_throttling.aggregated_reason & BIT(i))) 366 continue; 367 368 clk_throttle.clk_throttling_timestamp_us[i] = 369 ktime_to_us(hdev->clk_throttling.timestamp[i].start); 370 371 if (ktime_compare(hdev->clk_throttling.timestamp[i].end, zero_time)) 372 end_time = hdev->clk_throttling.timestamp[i].end; 373 else 374 end_time = ktime_get(); 375 376 clk_throttle.clk_throttling_duration_ns[i] = 377 ktime_to_ns(ktime_sub(end_time, 378 hdev->clk_throttling.timestamp[i].start)); 379 380 } 381 mutex_unlock(&hdev->clk_throttling.lock); 382 383 return copy_to_user(out, &clk_throttle, 384 min((size_t) max_size, sizeof(clk_throttle))) ? -EFAULT : 0; 385 } 386 387 static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 388 { 389 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 390 struct hl_info_cs_counters cs_counters = {0}; 391 struct hl_device *hdev = hpriv->hdev; 392 struct hl_cs_counters_atomic *cntr; 393 u32 max_size = args->return_size; 394 395 cntr = &hdev->aggregated_cs_counters; 396 397 if ((!max_size) || (!out)) 398 return -EINVAL; 399 400 cs_counters.total_out_of_mem_drop_cnt = 401 atomic64_read(&cntr->out_of_mem_drop_cnt); 402 cs_counters.total_parsing_drop_cnt = 403 atomic64_read(&cntr->parsing_drop_cnt); 404 cs_counters.total_queue_full_drop_cnt = 405 atomic64_read(&cntr->queue_full_drop_cnt); 406 cs_counters.total_device_in_reset_drop_cnt = 407 atomic64_read(&cntr->device_in_reset_drop_cnt); 408 cs_counters.total_max_cs_in_flight_drop_cnt = 409 atomic64_read(&cntr->max_cs_in_flight_drop_cnt); 410 cs_counters.total_validation_drop_cnt = 411 atomic64_read(&cntr->validation_drop_cnt); 412 413 if (hpriv->ctx) { 414 cs_counters.ctx_out_of_mem_drop_cnt = 415 atomic64_read( 416 &hpriv->ctx->cs_counters.out_of_mem_drop_cnt); 417 cs_counters.ctx_parsing_drop_cnt = 418 atomic64_read( 419 &hpriv->ctx->cs_counters.parsing_drop_cnt); 420 cs_counters.ctx_queue_full_drop_cnt = 421 atomic64_read( 422 &hpriv->ctx->cs_counters.queue_full_drop_cnt); 423 cs_counters.ctx_device_in_reset_drop_cnt = 424 atomic64_read( 425 &hpriv->ctx->cs_counters.device_in_reset_drop_cnt); 426 cs_counters.ctx_max_cs_in_flight_drop_cnt = 427 atomic64_read( 428 &hpriv->ctx->cs_counters.max_cs_in_flight_drop_cnt); 429 cs_counters.ctx_validation_drop_cnt = 430 atomic64_read( 431 &hpriv->ctx->cs_counters.validation_drop_cnt); 432 } 433 434 return copy_to_user(out, &cs_counters, 435 min((size_t) max_size, sizeof(cs_counters))) ? -EFAULT : 0; 436 } 437 438 static int sync_manager_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 439 { 440 struct hl_device *hdev = hpriv->hdev; 441 struct asic_fixed_properties *prop = &hdev->asic_prop; 442 struct hl_info_sync_manager sm_info = {0}; 443 u32 max_size = args->return_size; 444 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 445 446 if ((!max_size) || (!out)) 447 return -EINVAL; 448 449 if (args->dcore_id >= HL_MAX_DCORES) 450 return -EINVAL; 451 452 sm_info.first_available_sync_object = 453 prop->first_available_user_sob[args->dcore_id]; 454 sm_info.first_available_monitor = 455 prop->first_available_user_mon[args->dcore_id]; 456 sm_info.first_available_cq = 457 prop->first_available_cq[args->dcore_id]; 458 459 return copy_to_user(out, &sm_info, min_t(size_t, (size_t) max_size, 460 sizeof(sm_info))) ? -EFAULT : 0; 461 } 462 463 static int total_energy_consumption_info(struct hl_fpriv *hpriv, 464 struct hl_info_args *args) 465 { 466 struct hl_device *hdev = hpriv->hdev; 467 struct hl_info_energy total_energy = {0}; 468 u32 max_size = args->return_size; 469 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 470 int rc; 471 472 if ((!max_size) || (!out)) 473 return -EINVAL; 474 475 rc = hl_fw_cpucp_total_energy_get(hdev, 476 &total_energy.total_energy_consumption); 477 if (rc) 478 return rc; 479 480 return copy_to_user(out, &total_energy, 481 min((size_t) max_size, sizeof(total_energy))) ? -EFAULT : 0; 482 } 483 484 static int pll_frequency_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 485 { 486 struct hl_device *hdev = hpriv->hdev; 487 struct hl_pll_frequency_info freq_info = { {0} }; 488 u32 max_size = args->return_size; 489 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 490 int rc; 491 492 if ((!max_size) || (!out)) 493 return -EINVAL; 494 495 rc = hl_fw_cpucp_pll_info_get(hdev, args->pll_index, freq_info.output); 496 if (rc) 497 return rc; 498 499 return copy_to_user(out, &freq_info, 500 min((size_t) max_size, sizeof(freq_info))) ? -EFAULT : 0; 501 } 502 503 static int power_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 504 { 505 struct hl_device *hdev = hpriv->hdev; 506 u32 max_size = args->return_size; 507 struct hl_power_info power_info = {0}; 508 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 509 int rc; 510 511 if ((!max_size) || (!out)) 512 return -EINVAL; 513 514 rc = hl_fw_cpucp_power_get(hdev, &power_info.power); 515 if (rc) 516 return rc; 517 518 return copy_to_user(out, &power_info, 519 min((size_t) max_size, sizeof(power_info))) ? -EFAULT : 0; 520 } 521 522 static int open_stats_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 523 { 524 struct hl_device *hdev = hpriv->hdev; 525 u32 max_size = args->return_size; 526 struct hl_open_stats_info open_stats_info = {0}; 527 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 528 529 if ((!max_size) || (!out)) 530 return -EINVAL; 531 532 open_stats_info.last_open_period_ms = jiffies64_to_msecs( 533 hdev->last_open_session_duration_jif); 534 open_stats_info.open_counter = hdev->open_counter; 535 open_stats_info.is_compute_ctx_active = hdev->is_compute_ctx_active; 536 open_stats_info.compute_ctx_in_release = hdev->compute_ctx_in_release; 537 538 return copy_to_user(out, &open_stats_info, 539 min((size_t) max_size, sizeof(open_stats_info))) ? -EFAULT : 0; 540 } 541 542 static int dram_pending_rows_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 543 { 544 struct hl_device *hdev = hpriv->hdev; 545 u32 max_size = args->return_size; 546 u32 pend_rows_num = 0; 547 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 548 int rc; 549 550 if ((!max_size) || (!out)) 551 return -EINVAL; 552 553 rc = hl_fw_dram_pending_row_get(hdev, &pend_rows_num); 554 if (rc) 555 return rc; 556 557 return copy_to_user(out, &pend_rows_num, 558 min_t(size_t, max_size, sizeof(pend_rows_num))) ? -EFAULT : 0; 559 } 560 561 static int dram_replaced_rows_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 562 { 563 struct hl_device *hdev = hpriv->hdev; 564 u32 max_size = args->return_size; 565 struct cpucp_hbm_row_info info = {0}; 566 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 567 int rc; 568 569 if ((!max_size) || (!out)) 570 return -EINVAL; 571 572 rc = hl_fw_dram_replaced_row_get(hdev, &info); 573 if (rc) 574 return rc; 575 576 return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0; 577 } 578 579 static int last_err_open_dev_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 580 { 581 struct hl_info_last_err_open_dev_time info = {0}; 582 struct hl_device *hdev = hpriv->hdev; 583 u32 max_size = args->return_size; 584 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 585 586 if ((!max_size) || (!out)) 587 return -EINVAL; 588 589 info.timestamp = ktime_to_ns(hdev->last_successful_open_ktime); 590 591 return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0; 592 } 593 594 static int cs_timeout_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 595 { 596 struct hl_info_cs_timeout_event info = {0}; 597 struct hl_device *hdev = hpriv->hdev; 598 u32 max_size = args->return_size; 599 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 600 601 if ((!max_size) || (!out)) 602 return -EINVAL; 603 604 info.seq = hdev->captured_err_info.cs_timeout.seq; 605 info.timestamp = ktime_to_ns(hdev->captured_err_info.cs_timeout.timestamp); 606 607 return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0; 608 } 609 610 static int razwi_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 611 { 612 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 613 struct hl_device *hdev = hpriv->hdev; 614 u32 max_size = args->return_size; 615 struct razwi_info *razwi_info; 616 617 if ((!max_size) || (!out)) 618 return -EINVAL; 619 620 razwi_info = &hdev->captured_err_info.razwi_info; 621 if (!razwi_info->razwi_info_available) 622 return 0; 623 624 return copy_to_user(out, &razwi_info->razwi, 625 min_t(size_t, max_size, sizeof(struct hl_info_razwi_event))) ? -EFAULT : 0; 626 } 627 628 static int undefined_opcode_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 629 { 630 struct hl_device *hdev = hpriv->hdev; 631 u32 max_size = args->return_size; 632 struct hl_info_undefined_opcode_event info = {0}; 633 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 634 635 if ((!max_size) || (!out)) 636 return -EINVAL; 637 638 info.timestamp = ktime_to_ns(hdev->captured_err_info.undef_opcode.timestamp); 639 info.engine_id = hdev->captured_err_info.undef_opcode.engine_id; 640 info.cq_addr = hdev->captured_err_info.undef_opcode.cq_addr; 641 info.cq_size = hdev->captured_err_info.undef_opcode.cq_size; 642 info.stream_id = hdev->captured_err_info.undef_opcode.stream_id; 643 info.cb_addr_streams_len = hdev->captured_err_info.undef_opcode.cb_addr_streams_len; 644 memcpy(info.cb_addr_streams, hdev->captured_err_info.undef_opcode.cb_addr_streams, 645 sizeof(info.cb_addr_streams)); 646 647 return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0; 648 } 649 650 static int dev_mem_alloc_page_sizes_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 651 { 652 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 653 struct hl_info_dev_memalloc_page_sizes info = {0}; 654 struct hl_device *hdev = hpriv->hdev; 655 u32 max_size = args->return_size; 656 657 if ((!max_size) || (!out)) 658 return -EINVAL; 659 660 /* 661 * Future ASICs that will support multiple DRAM page sizes will support only "powers of 2" 662 * pages (unlike some of the ASICs before supporting multiple page sizes). 663 * For this reason for all ASICs that not support multiple page size the function will 664 * return an empty bitmask indicating that multiple page sizes is not supported. 665 */ 666 info.page_order_bitmask = hdev->asic_prop.dmmu.supported_pages_mask; 667 668 return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0; 669 } 670 671 static int sec_attest_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 672 { 673 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 674 struct cpucp_sec_attest_info *sec_attest_info; 675 struct hl_info_sec_attest *info; 676 u32 max_size = args->return_size; 677 int rc; 678 679 if ((!max_size) || (!out)) 680 return -EINVAL; 681 682 sec_attest_info = kmalloc(sizeof(*sec_attest_info), GFP_KERNEL); 683 if (!sec_attest_info) 684 return -ENOMEM; 685 686 info = kmalloc(sizeof(*info), GFP_KERNEL); 687 if (!info) { 688 rc = -ENOMEM; 689 goto free_sec_attest_info; 690 } 691 692 rc = hl_fw_get_sec_attest_info(hpriv->hdev, sec_attest_info, args->sec_attest_nonce); 693 if (rc) 694 goto free_info; 695 696 info->nonce = le32_to_cpu(sec_attest_info->nonce); 697 info->pcr_quote_len = le16_to_cpu(sec_attest_info->pcr_quote_len); 698 info->pub_data_len = le16_to_cpu(sec_attest_info->pub_data_len); 699 info->certificate_len = le16_to_cpu(sec_attest_info->certificate_len); 700 info->pcr_num_reg = sec_attest_info->pcr_num_reg; 701 info->pcr_reg_len = sec_attest_info->pcr_reg_len; 702 info->quote_sig_len = sec_attest_info->quote_sig_len; 703 memcpy(&info->pcr_data, &sec_attest_info->pcr_data, sizeof(info->pcr_data)); 704 memcpy(&info->pcr_quote, &sec_attest_info->pcr_quote, sizeof(info->pcr_quote)); 705 memcpy(&info->public_data, &sec_attest_info->public_data, sizeof(info->public_data)); 706 memcpy(&info->certificate, &sec_attest_info->certificate, sizeof(info->certificate)); 707 memcpy(&info->quote_sig, &sec_attest_info->quote_sig, sizeof(info->quote_sig)); 708 709 rc = copy_to_user(out, info, 710 min_t(size_t, max_size, sizeof(*info))) ? -EFAULT : 0; 711 712 free_info: 713 kfree(info); 714 free_sec_attest_info: 715 kfree(sec_attest_info); 716 717 return rc; 718 } 719 720 static int eventfd_register(struct hl_fpriv *hpriv, struct hl_info_args *args) 721 { 722 int rc; 723 724 /* check if there is already a registered on that process */ 725 mutex_lock(&hpriv->notifier_event.lock); 726 if (hpriv->notifier_event.eventfd) { 727 mutex_unlock(&hpriv->notifier_event.lock); 728 return -EINVAL; 729 } 730 731 hpriv->notifier_event.eventfd = eventfd_ctx_fdget(args->eventfd); 732 if (IS_ERR(hpriv->notifier_event.eventfd)) { 733 rc = PTR_ERR(hpriv->notifier_event.eventfd); 734 hpriv->notifier_event.eventfd = NULL; 735 mutex_unlock(&hpriv->notifier_event.lock); 736 return rc; 737 } 738 739 mutex_unlock(&hpriv->notifier_event.lock); 740 return 0; 741 } 742 743 static int eventfd_unregister(struct hl_fpriv *hpriv, struct hl_info_args *args) 744 { 745 mutex_lock(&hpriv->notifier_event.lock); 746 if (!hpriv->notifier_event.eventfd) { 747 mutex_unlock(&hpriv->notifier_event.lock); 748 return -EINVAL; 749 } 750 751 eventfd_ctx_put(hpriv->notifier_event.eventfd); 752 hpriv->notifier_event.eventfd = NULL; 753 mutex_unlock(&hpriv->notifier_event.lock); 754 return 0; 755 } 756 757 static int engine_status_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 758 { 759 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 760 u32 status_buf_size = args->return_size; 761 struct hl_device *hdev = hpriv->hdev; 762 struct engines_data eng_data; 763 int rc; 764 765 if ((status_buf_size < SZ_1K) || (status_buf_size > HL_ENGINES_DATA_MAX_SIZE) || (!out)) 766 return -EINVAL; 767 768 eng_data.actual_size = 0; 769 eng_data.allocated_buf_size = status_buf_size; 770 eng_data.buf = vmalloc(status_buf_size); 771 if (!eng_data.buf) 772 return -ENOMEM; 773 774 hdev->asic_funcs->is_device_idle(hdev, NULL, 0, &eng_data); 775 776 if (eng_data.actual_size > eng_data.allocated_buf_size) { 777 dev_err(hdev->dev, 778 "Engines data size (%d Bytes) is bigger than allocated size (%u Bytes)\n", 779 eng_data.actual_size, status_buf_size); 780 vfree(eng_data.buf); 781 return -ENOMEM; 782 } 783 784 args->user_buffer_actual_size = eng_data.actual_size; 785 rc = copy_to_user(out, eng_data.buf, min_t(size_t, status_buf_size, eng_data.actual_size)) ? 786 -EFAULT : 0; 787 788 vfree(eng_data.buf); 789 790 return rc; 791 } 792 793 static int page_fault_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 794 { 795 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 796 struct hl_device *hdev = hpriv->hdev; 797 u32 max_size = args->return_size; 798 struct page_fault_info *pgf_info; 799 800 if ((!max_size) || (!out)) 801 return -EINVAL; 802 803 pgf_info = &hdev->captured_err_info.page_fault_info; 804 if (!pgf_info->page_fault_info_available) 805 return 0; 806 807 return copy_to_user(out, &pgf_info->page_fault, 808 min_t(size_t, max_size, sizeof(struct hl_page_fault_info))) ? -EFAULT : 0; 809 } 810 811 static int user_mappings_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 812 { 813 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 814 u32 user_buf_size = args->return_size; 815 struct hl_device *hdev = hpriv->hdev; 816 struct page_fault_info *pgf_info; 817 u64 actual_size; 818 819 if (!out) 820 return -EINVAL; 821 822 pgf_info = &hdev->captured_err_info.page_fault_info; 823 if (!pgf_info->page_fault_info_available) 824 return 0; 825 826 args->array_size = pgf_info->num_of_user_mappings; 827 828 actual_size = pgf_info->num_of_user_mappings * sizeof(struct hl_user_mapping); 829 if (user_buf_size < actual_size) 830 return -ENOMEM; 831 832 return copy_to_user(out, pgf_info->user_mappings, actual_size) ? -EFAULT : 0; 833 } 834 835 static int hw_err_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 836 { 837 void __user *user_buf = (void __user *) (uintptr_t) args->return_pointer; 838 struct hl_device *hdev = hpriv->hdev; 839 u32 user_buf_size = args->return_size; 840 struct hw_err_info *info; 841 int rc; 842 843 if ((!user_buf_size) || (!user_buf)) 844 return -EINVAL; 845 846 if (user_buf_size < sizeof(struct hl_info_hw_err_event)) 847 return -ENOMEM; 848 849 info = &hdev->captured_err_info.hw_err; 850 if (!info->event_info_available) 851 return -ENOENT; 852 853 rc = copy_to_user(user_buf, &info->event, sizeof(struct hl_info_hw_err_event)); 854 return rc ? -EFAULT : 0; 855 } 856 857 static int fw_err_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 858 { 859 void __user *user_buf = (void __user *) (uintptr_t) args->return_pointer; 860 struct hl_device *hdev = hpriv->hdev; 861 u32 user_buf_size = args->return_size; 862 struct fw_err_info *info; 863 int rc; 864 865 if ((!user_buf_size) || (!user_buf)) 866 return -EINVAL; 867 868 if (user_buf_size < sizeof(struct hl_info_fw_err_event)) 869 return -ENOMEM; 870 871 info = &hdev->captured_err_info.fw_err; 872 if (!info->event_info_available) 873 return -ENOENT; 874 875 rc = copy_to_user(user_buf, &info->event, sizeof(struct hl_info_fw_err_event)); 876 return rc ? -EFAULT : 0; 877 } 878 879 static int send_fw_generic_request(struct hl_device *hdev, struct hl_info_args *info_args) 880 { 881 void __user *buff = (void __user *) (uintptr_t) info_args->return_pointer; 882 u32 size = info_args->return_size; 883 dma_addr_t dma_handle; 884 bool need_input_buff; 885 void *fw_buff; 886 int rc = 0; 887 888 switch (info_args->fw_sub_opcode) { 889 case HL_PASSTHROUGH_VERSIONS: 890 need_input_buff = false; 891 break; 892 default: 893 return -EINVAL; 894 } 895 896 if (size > SZ_1M) { 897 dev_err(hdev->dev, "buffer size cannot exceed 1MB\n"); 898 return -EINVAL; 899 } 900 901 fw_buff = hl_cpu_accessible_dma_pool_alloc(hdev, size, &dma_handle); 902 if (!fw_buff) 903 return -ENOMEM; 904 905 906 if (need_input_buff && copy_from_user(fw_buff, buff, size)) { 907 dev_dbg(hdev->dev, "Failed to copy from user FW buff\n"); 908 rc = -EFAULT; 909 goto free_buff; 910 } 911 912 rc = hl_fw_send_generic_request(hdev, info_args->fw_sub_opcode, dma_handle, &size); 913 if (rc) 914 goto free_buff; 915 916 if (copy_to_user(buff, fw_buff, min(size, info_args->return_size))) { 917 dev_dbg(hdev->dev, "Failed to copy to user FW generic req output\n"); 918 rc = -EFAULT; 919 } 920 921 free_buff: 922 hl_cpu_accessible_dma_pool_free(hdev, info_args->return_size, fw_buff); 923 924 return rc; 925 } 926 927 static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, 928 struct device *dev) 929 { 930 enum hl_device_status status; 931 struct hl_info_args *args = data; 932 struct hl_device *hdev = hpriv->hdev; 933 int rc; 934 935 if (args->pad) { 936 dev_dbg(hdev->dev, "Padding bytes must be 0\n"); 937 return -EINVAL; 938 } 939 940 /* 941 * Information is returned for the following opcodes even if the device 942 * is disabled or in reset. 943 */ 944 switch (args->op) { 945 case HL_INFO_HW_IP_INFO: 946 return hw_ip_info(hdev, args); 947 948 case HL_INFO_DEVICE_STATUS: 949 return device_status_info(hdev, args); 950 951 case HL_INFO_RESET_COUNT: 952 return get_reset_count(hdev, args); 953 954 case HL_INFO_HW_EVENTS: 955 return hw_events_info(hdev, false, args); 956 957 case HL_INFO_HW_EVENTS_AGGREGATE: 958 return hw_events_info(hdev, true, args); 959 960 case HL_INFO_CS_COUNTERS: 961 return cs_counters_info(hpriv, args); 962 963 case HL_INFO_CLK_THROTTLE_REASON: 964 return clk_throttle_info(hpriv, args); 965 966 case HL_INFO_SYNC_MANAGER: 967 return sync_manager_info(hpriv, args); 968 969 case HL_INFO_OPEN_STATS: 970 return open_stats_info(hpriv, args); 971 972 case HL_INFO_LAST_ERR_OPEN_DEV_TIME: 973 return last_err_open_dev_info(hpriv, args); 974 975 case HL_INFO_CS_TIMEOUT_EVENT: 976 return cs_timeout_info(hpriv, args); 977 978 case HL_INFO_RAZWI_EVENT: 979 return razwi_info(hpriv, args); 980 981 case HL_INFO_UNDEFINED_OPCODE_EVENT: 982 return undefined_opcode_info(hpriv, args); 983 984 case HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES: 985 return dev_mem_alloc_page_sizes_info(hpriv, args); 986 987 case HL_INFO_GET_EVENTS: 988 return events_info(hpriv, args); 989 990 case HL_INFO_PAGE_FAULT_EVENT: 991 return page_fault_info(hpriv, args); 992 993 case HL_INFO_USER_MAPPINGS: 994 return user_mappings_info(hpriv, args); 995 996 case HL_INFO_UNREGISTER_EVENTFD: 997 return eventfd_unregister(hpriv, args); 998 999 case HL_INFO_HW_ERR_EVENT: 1000 return hw_err_info(hpriv, args); 1001 1002 case HL_INFO_FW_ERR_EVENT: 1003 return fw_err_info(hpriv, args); 1004 1005 default: 1006 break; 1007 } 1008 1009 if (!hl_device_operational(hdev, &status)) { 1010 dev_dbg_ratelimited(dev, 1011 "Device is %s. Can't execute INFO IOCTL\n", 1012 hdev->status[status]); 1013 return -EBUSY; 1014 } 1015 1016 switch (args->op) { 1017 case HL_INFO_DRAM_USAGE: 1018 rc = dram_usage_info(hpriv, args); 1019 break; 1020 1021 case HL_INFO_HW_IDLE: 1022 rc = hw_idle(hdev, args); 1023 break; 1024 1025 case HL_INFO_DEVICE_UTILIZATION: 1026 rc = device_utilization(hdev, args); 1027 break; 1028 1029 case HL_INFO_CLK_RATE: 1030 rc = get_clk_rate(hdev, args); 1031 break; 1032 1033 case HL_INFO_TIME_SYNC: 1034 return time_sync_info(hdev, args); 1035 1036 case HL_INFO_PCI_COUNTERS: 1037 return pci_counters_info(hpriv, args); 1038 1039 case HL_INFO_TOTAL_ENERGY: 1040 return total_energy_consumption_info(hpriv, args); 1041 1042 case HL_INFO_PLL_FREQUENCY: 1043 return pll_frequency_info(hpriv, args); 1044 1045 case HL_INFO_POWER: 1046 return power_info(hpriv, args); 1047 1048 1049 case HL_INFO_DRAM_REPLACED_ROWS: 1050 return dram_replaced_rows_info(hpriv, args); 1051 1052 case HL_INFO_DRAM_PENDING_ROWS: 1053 return dram_pending_rows_info(hpriv, args); 1054 1055 case HL_INFO_SECURED_ATTESTATION: 1056 return sec_attest_info(hpriv, args); 1057 1058 case HL_INFO_REGISTER_EVENTFD: 1059 return eventfd_register(hpriv, args); 1060 1061 case HL_INFO_ENGINE_STATUS: 1062 return engine_status_info(hpriv, args); 1063 1064 case HL_INFO_FW_GENERIC_REQ: 1065 return send_fw_generic_request(hdev, args); 1066 1067 default: 1068 dev_err(dev, "Invalid request %d\n", args->op); 1069 rc = -EINVAL; 1070 break; 1071 } 1072 1073 return rc; 1074 } 1075 1076 static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data) 1077 { 1078 return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev); 1079 } 1080 1081 static int hl_info_ioctl_control(struct hl_fpriv *hpriv, void *data) 1082 { 1083 return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev_ctrl); 1084 } 1085 1086 static int hl_debug_ioctl(struct hl_fpriv *hpriv, void *data) 1087 { 1088 struct hl_debug_args *args = data; 1089 struct hl_device *hdev = hpriv->hdev; 1090 enum hl_device_status status; 1091 1092 int rc = 0; 1093 1094 if (!hl_device_operational(hdev, &status)) { 1095 dev_dbg_ratelimited(hdev->dev, 1096 "Device is %s. Can't execute DEBUG IOCTL\n", 1097 hdev->status[status]); 1098 return -EBUSY; 1099 } 1100 1101 switch (args->op) { 1102 case HL_DEBUG_OP_ETR: 1103 case HL_DEBUG_OP_ETF: 1104 case HL_DEBUG_OP_STM: 1105 case HL_DEBUG_OP_FUNNEL: 1106 case HL_DEBUG_OP_BMON: 1107 case HL_DEBUG_OP_SPMU: 1108 case HL_DEBUG_OP_TIMESTAMP: 1109 if (!hdev->in_debug) { 1110 dev_err_ratelimited(hdev->dev, 1111 "Rejecting debug configuration request because device not in debug mode\n"); 1112 return -EFAULT; 1113 } 1114 args->input_size = min(args->input_size, hl_debug_struct_size[args->op]); 1115 rc = debug_coresight(hdev, hpriv->ctx, args); 1116 break; 1117 1118 case HL_DEBUG_OP_SET_MODE: 1119 rc = hl_device_set_debug_mode(hdev, hpriv->ctx, (bool) args->enable); 1120 break; 1121 1122 default: 1123 dev_err(hdev->dev, "Invalid request %d\n", args->op); 1124 rc = -EINVAL; 1125 break; 1126 } 1127 1128 return rc; 1129 } 1130 1131 #define HL_IOCTL_DEF(ioctl, _func) \ 1132 [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func} 1133 1134 static const struct hl_ioctl_desc hl_ioctls[] = { 1135 HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl), 1136 HL_IOCTL_DEF(HL_IOCTL_CB, hl_cb_ioctl), 1137 HL_IOCTL_DEF(HL_IOCTL_CS, hl_cs_ioctl), 1138 HL_IOCTL_DEF(HL_IOCTL_WAIT_CS, hl_wait_ioctl), 1139 HL_IOCTL_DEF(HL_IOCTL_MEMORY, hl_mem_ioctl), 1140 HL_IOCTL_DEF(HL_IOCTL_DEBUG, hl_debug_ioctl) 1141 }; 1142 1143 static const struct hl_ioctl_desc hl_ioctls_control[] = { 1144 HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl_control) 1145 }; 1146 1147 static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg, 1148 const struct hl_ioctl_desc *ioctl, struct device *dev) 1149 { 1150 struct hl_fpriv *hpriv = filep->private_data; 1151 unsigned int nr = _IOC_NR(cmd); 1152 char stack_kdata[128] = {0}; 1153 char *kdata = NULL; 1154 unsigned int usize, asize; 1155 hl_ioctl_t *func; 1156 u32 hl_size; 1157 int retcode; 1158 1159 /* Do not trust userspace, use our own definition */ 1160 func = ioctl->func; 1161 1162 if (unlikely(!func)) { 1163 dev_dbg(dev, "no function\n"); 1164 retcode = -ENOTTY; 1165 goto out_err; 1166 } 1167 1168 hl_size = _IOC_SIZE(ioctl->cmd); 1169 usize = asize = _IOC_SIZE(cmd); 1170 if (hl_size > asize) 1171 asize = hl_size; 1172 1173 cmd = ioctl->cmd; 1174 1175 if (cmd & (IOC_IN | IOC_OUT)) { 1176 if (asize <= sizeof(stack_kdata)) { 1177 kdata = stack_kdata; 1178 } else { 1179 kdata = kzalloc(asize, GFP_KERNEL); 1180 if (!kdata) { 1181 retcode = -ENOMEM; 1182 goto out_err; 1183 } 1184 } 1185 } 1186 1187 if (cmd & IOC_IN) { 1188 if (copy_from_user(kdata, (void __user *)arg, usize)) { 1189 retcode = -EFAULT; 1190 goto out_err; 1191 } 1192 } 1193 1194 retcode = func(hpriv, kdata); 1195 1196 if ((cmd & IOC_OUT) && copy_to_user((void __user *)arg, kdata, usize)) 1197 retcode = -EFAULT; 1198 1199 out_err: 1200 if (retcode) 1201 dev_dbg(dev, "error in ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n", 1202 task_pid_nr(current), cmd, nr); 1203 1204 if (kdata != stack_kdata) 1205 kfree(kdata); 1206 1207 return retcode; 1208 } 1209 1210 long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) 1211 { 1212 struct hl_fpriv *hpriv = filep->private_data; 1213 struct hl_device *hdev = hpriv->hdev; 1214 const struct hl_ioctl_desc *ioctl = NULL; 1215 unsigned int nr = _IOC_NR(cmd); 1216 1217 if (!hdev) { 1218 pr_err_ratelimited("Sending ioctl after device was removed! Please close FD\n"); 1219 return -ENODEV; 1220 } 1221 1222 if ((nr >= HL_COMMAND_START) && (nr < HL_COMMAND_END)) { 1223 ioctl = &hl_ioctls[nr]; 1224 } else { 1225 dev_err(hdev->dev, "invalid ioctl: pid=%d, nr=0x%02x\n", 1226 task_pid_nr(current), nr); 1227 return -ENOTTY; 1228 } 1229 1230 return _hl_ioctl(filep, cmd, arg, ioctl, hdev->dev); 1231 } 1232 1233 long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg) 1234 { 1235 struct hl_fpriv *hpriv = filep->private_data; 1236 struct hl_device *hdev = hpriv->hdev; 1237 const struct hl_ioctl_desc *ioctl = NULL; 1238 unsigned int nr = _IOC_NR(cmd); 1239 1240 if (!hdev) { 1241 pr_err_ratelimited("Sending ioctl after device was removed! Please close FD\n"); 1242 return -ENODEV; 1243 } 1244 1245 if (nr == _IOC_NR(HL_IOCTL_INFO)) { 1246 ioctl = &hl_ioctls_control[nr]; 1247 } else { 1248 dev_err(hdev->dev_ctrl, "invalid ioctl: pid=%d, nr=0x%02x\n", 1249 task_pid_nr(current), nr); 1250 return -ENOTTY; 1251 } 1252 1253 return _hl_ioctl(filep, cmd, arg, ioctl, hdev->dev_ctrl); 1254 } 1255