1 // SPDX-License-Identifier: GPL-2.0 2 3 /* 4 * Copyright 2016-2022 HabanaLabs, Ltd. 5 * All Rights Reserved. 6 */ 7 8 #define pr_fmt(fmt) "habanalabs: " fmt 9 10 #include <uapi/drm/habanalabs_accel.h> 11 #include "habanalabs.h" 12 13 #include <linux/fs.h> 14 #include <linux/kernel.h> 15 #include <linux/pci.h> 16 #include <linux/slab.h> 17 #include <linux/uaccess.h> 18 #include <linux/vmalloc.h> 19 20 static u32 hl_debug_struct_size[HL_DEBUG_OP_TIMESTAMP + 1] = { 21 [HL_DEBUG_OP_ETR] = sizeof(struct hl_debug_params_etr), 22 [HL_DEBUG_OP_ETF] = sizeof(struct hl_debug_params_etf), 23 [HL_DEBUG_OP_STM] = sizeof(struct hl_debug_params_stm), 24 [HL_DEBUG_OP_FUNNEL] = 0, 25 [HL_DEBUG_OP_BMON] = sizeof(struct hl_debug_params_bmon), 26 [HL_DEBUG_OP_SPMU] = sizeof(struct hl_debug_params_spmu), 27 [HL_DEBUG_OP_TIMESTAMP] = 0 28 29 }; 30 31 static int device_status_info(struct hl_device *hdev, struct hl_info_args *args) 32 { 33 struct hl_info_device_status dev_stat = {0}; 34 u32 size = args->return_size; 35 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 36 37 if ((!size) || (!out)) 38 return -EINVAL; 39 40 dev_stat.status = hl_device_status(hdev); 41 42 return copy_to_user(out, &dev_stat, 43 min((size_t)size, sizeof(dev_stat))) ? -EFAULT : 0; 44 } 45 46 static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args) 47 { 48 struct hl_info_hw_ip_info hw_ip = {0}; 49 u32 size = args->return_size; 50 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 51 struct asic_fixed_properties *prop = &hdev->asic_prop; 52 u64 sram_kmd_size, dram_kmd_size, dram_available_size; 53 54 if ((!size) || (!out)) 55 return -EINVAL; 56 57 sram_kmd_size = (prop->sram_user_base_address - 58 prop->sram_base_address); 59 dram_kmd_size = (prop->dram_user_base_address - 60 prop->dram_base_address); 61 62 hw_ip.device_id = hdev->asic_funcs->get_pci_id(hdev); 63 hw_ip.sram_base_address = prop->sram_user_base_address; 64 hw_ip.dram_base_address = 65 prop->dram_supports_virtual_memory ? 66 prop->dmmu.start_addr : prop->dram_user_base_address; 67 hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask & 0xFF; 68 hw_ip.tpc_enabled_mask_ext = prop->tpc_enabled_mask; 69 70 hw_ip.sram_size = prop->sram_size - sram_kmd_size; 71 72 dram_available_size = prop->dram_size - dram_kmd_size; 73 74 hw_ip.dram_size = DIV_ROUND_DOWN_ULL(dram_available_size, prop->dram_page_size) * 75 prop->dram_page_size; 76 77 if (hw_ip.dram_size > PAGE_SIZE) 78 hw_ip.dram_enabled = 1; 79 80 hw_ip.dram_page_size = prop->dram_page_size; 81 hw_ip.device_mem_alloc_default_page_size = prop->device_mem_alloc_default_page_size; 82 hw_ip.num_of_events = prop->num_of_events; 83 84 memcpy(hw_ip.cpucp_version, prop->cpucp_info.cpucp_version, 85 min(VERSION_MAX_LEN, HL_INFO_VERSION_MAX_LEN)); 86 87 memcpy(hw_ip.card_name, prop->cpucp_info.card_name, 88 min(CARD_NAME_MAX_LEN, HL_INFO_CARD_NAME_MAX_LEN)); 89 90 hw_ip.cpld_version = le32_to_cpu(prop->cpucp_info.cpld_version); 91 hw_ip.module_id = le32_to_cpu(prop->cpucp_info.card_location); 92 93 hw_ip.psoc_pci_pll_nr = prop->psoc_pci_pll_nr; 94 hw_ip.psoc_pci_pll_nf = prop->psoc_pci_pll_nf; 95 hw_ip.psoc_pci_pll_od = prop->psoc_pci_pll_od; 96 hw_ip.psoc_pci_pll_div_factor = prop->psoc_pci_pll_div_factor; 97 98 hw_ip.decoder_enabled_mask = prop->decoder_enabled_mask; 99 hw_ip.mme_master_slave_mode = prop->mme_master_slave_mode; 100 hw_ip.first_available_interrupt_id = prop->first_available_user_interrupt; 101 hw_ip.number_of_user_interrupts = prop->user_interrupt_count; 102 hw_ip.tpc_interrupt_id = prop->tpc_interrupt_id; 103 104 hw_ip.edma_enabled_mask = prop->edma_enabled_mask; 105 hw_ip.server_type = prop->server_type; 106 hw_ip.security_enabled = prop->fw_security_enabled; 107 hw_ip.revision_id = hdev->pdev->revision; 108 hw_ip.rotator_enabled_mask = prop->rotator_enabled_mask; 109 hw_ip.engine_core_interrupt_reg_addr = prop->engine_core_interrupt_reg_addr; 110 hw_ip.reserved_dram_size = dram_kmd_size; 111 112 return copy_to_user(out, &hw_ip, 113 min((size_t) size, sizeof(hw_ip))) ? -EFAULT : 0; 114 } 115 116 static int hw_events_info(struct hl_device *hdev, bool aggregate, 117 struct hl_info_args *args) 118 { 119 u32 size, max_size = args->return_size; 120 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 121 void *arr; 122 123 if ((!max_size) || (!out)) 124 return -EINVAL; 125 126 arr = hdev->asic_funcs->get_events_stat(hdev, aggregate, &size); 127 if (!arr) { 128 dev_err(hdev->dev, "Events info not supported\n"); 129 return -EOPNOTSUPP; 130 } 131 132 return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0; 133 } 134 135 static int events_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 136 { 137 u32 max_size = args->return_size; 138 u64 events_mask; 139 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 140 141 if ((max_size < sizeof(u64)) || (!out)) 142 return -EINVAL; 143 144 mutex_lock(&hpriv->notifier_event.lock); 145 events_mask = hpriv->notifier_event.events_mask; 146 hpriv->notifier_event.events_mask = 0; 147 mutex_unlock(&hpriv->notifier_event.lock); 148 149 return copy_to_user(out, &events_mask, sizeof(u64)) ? -EFAULT : 0; 150 } 151 152 static int dram_usage_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 153 { 154 struct hl_device *hdev = hpriv->hdev; 155 struct hl_info_dram_usage dram_usage = {0}; 156 u32 max_size = args->return_size; 157 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 158 struct asic_fixed_properties *prop = &hdev->asic_prop; 159 u64 dram_kmd_size; 160 161 if ((!max_size) || (!out)) 162 return -EINVAL; 163 164 dram_kmd_size = (prop->dram_user_base_address - 165 prop->dram_base_address); 166 dram_usage.dram_free_mem = (prop->dram_size - dram_kmd_size) - 167 atomic64_read(&hdev->dram_used_mem); 168 if (hpriv->ctx) 169 dram_usage.ctx_dram_mem = 170 atomic64_read(&hpriv->ctx->dram_phys_mem); 171 172 return copy_to_user(out, &dram_usage, 173 min((size_t) max_size, sizeof(dram_usage))) ? -EFAULT : 0; 174 } 175 176 static int hw_idle(struct hl_device *hdev, struct hl_info_args *args) 177 { 178 struct hl_info_hw_idle hw_idle = {0}; 179 u32 max_size = args->return_size; 180 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 181 182 if ((!max_size) || (!out)) 183 return -EINVAL; 184 185 hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev, 186 hw_idle.busy_engines_mask_ext, 187 HL_BUSY_ENGINES_MASK_EXT_SIZE, NULL); 188 hw_idle.busy_engines_mask = 189 lower_32_bits(hw_idle.busy_engines_mask_ext[0]); 190 191 return copy_to_user(out, &hw_idle, 192 min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0; 193 } 194 195 static int debug_coresight(struct hl_device *hdev, struct hl_ctx *ctx, struct hl_debug_args *args) 196 { 197 struct hl_debug_params *params; 198 void *input = NULL, *output = NULL; 199 int rc; 200 201 params = kzalloc(sizeof(*params), GFP_KERNEL); 202 if (!params) 203 return -ENOMEM; 204 205 params->reg_idx = args->reg_idx; 206 params->enable = args->enable; 207 params->op = args->op; 208 209 if (args->input_ptr && args->input_size) { 210 input = kzalloc(hl_debug_struct_size[args->op], GFP_KERNEL); 211 if (!input) { 212 rc = -ENOMEM; 213 goto out; 214 } 215 216 if (copy_from_user(input, u64_to_user_ptr(args->input_ptr), 217 args->input_size)) { 218 rc = -EFAULT; 219 dev_err(hdev->dev, "failed to copy input debug data\n"); 220 goto out; 221 } 222 223 params->input = input; 224 } 225 226 if (args->output_ptr && args->output_size) { 227 output = kzalloc(args->output_size, GFP_KERNEL); 228 if (!output) { 229 rc = -ENOMEM; 230 goto out; 231 } 232 233 params->output = output; 234 params->output_size = args->output_size; 235 } 236 237 rc = hdev->asic_funcs->debug_coresight(hdev, ctx, params); 238 if (rc) { 239 dev_err(hdev->dev, 240 "debug coresight operation failed %d\n", rc); 241 goto out; 242 } 243 244 if (output && copy_to_user((void __user *) (uintptr_t) args->output_ptr, 245 output, args->output_size)) { 246 dev_err(hdev->dev, "copy to user failed in debug ioctl\n"); 247 rc = -EFAULT; 248 goto out; 249 } 250 251 252 out: 253 kfree(params); 254 kfree(output); 255 kfree(input); 256 257 return rc; 258 } 259 260 static int device_utilization(struct hl_device *hdev, struct hl_info_args *args) 261 { 262 struct hl_info_device_utilization device_util = {0}; 263 u32 max_size = args->return_size; 264 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 265 int rc; 266 267 if ((!max_size) || (!out)) 268 return -EINVAL; 269 270 rc = hl_device_utilization(hdev, &device_util.utilization); 271 if (rc) 272 return -EINVAL; 273 274 return copy_to_user(out, &device_util, 275 min((size_t) max_size, sizeof(device_util))) ? -EFAULT : 0; 276 } 277 278 static int get_clk_rate(struct hl_device *hdev, struct hl_info_args *args) 279 { 280 struct hl_info_clk_rate clk_rate = {0}; 281 u32 max_size = args->return_size; 282 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 283 int rc; 284 285 if ((!max_size) || (!out)) 286 return -EINVAL; 287 288 rc = hl_fw_get_clk_rate(hdev, &clk_rate.cur_clk_rate_mhz, &clk_rate.max_clk_rate_mhz); 289 if (rc) 290 return rc; 291 292 return copy_to_user(out, &clk_rate, min_t(size_t, max_size, sizeof(clk_rate))) 293 ? -EFAULT : 0; 294 } 295 296 static int get_reset_count(struct hl_device *hdev, struct hl_info_args *args) 297 { 298 struct hl_info_reset_count reset_count = {0}; 299 u32 max_size = args->return_size; 300 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 301 302 if ((!max_size) || (!out)) 303 return -EINVAL; 304 305 reset_count.hard_reset_cnt = hdev->reset_info.hard_reset_cnt; 306 reset_count.soft_reset_cnt = hdev->reset_info.compute_reset_cnt; 307 308 return copy_to_user(out, &reset_count, 309 min((size_t) max_size, sizeof(reset_count))) ? -EFAULT : 0; 310 } 311 312 static int time_sync_info(struct hl_device *hdev, struct hl_info_args *args) 313 { 314 struct hl_info_time_sync time_sync = {0}; 315 u32 max_size = args->return_size; 316 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 317 318 if ((!max_size) || (!out)) 319 return -EINVAL; 320 321 time_sync.device_time = hdev->asic_funcs->get_device_time(hdev); 322 time_sync.host_time = ktime_get_raw_ns(); 323 324 return copy_to_user(out, &time_sync, 325 min((size_t) max_size, sizeof(time_sync))) ? -EFAULT : 0; 326 } 327 328 static int pci_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 329 { 330 struct hl_device *hdev = hpriv->hdev; 331 struct hl_info_pci_counters pci_counters = {0}; 332 u32 max_size = args->return_size; 333 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 334 int rc; 335 336 if ((!max_size) || (!out)) 337 return -EINVAL; 338 339 rc = hl_fw_cpucp_pci_counters_get(hdev, &pci_counters); 340 if (rc) 341 return rc; 342 343 return copy_to_user(out, &pci_counters, 344 min((size_t) max_size, sizeof(pci_counters))) ? -EFAULT : 0; 345 } 346 347 static int clk_throttle_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 348 { 349 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 350 struct hl_device *hdev = hpriv->hdev; 351 struct hl_info_clk_throttle clk_throttle = {0}; 352 ktime_t end_time, zero_time = ktime_set(0, 0); 353 u32 max_size = args->return_size; 354 int i; 355 356 if ((!max_size) || (!out)) 357 return -EINVAL; 358 359 mutex_lock(&hdev->clk_throttling.lock); 360 361 clk_throttle.clk_throttling_reason = hdev->clk_throttling.current_reason; 362 363 for (i = 0 ; i < HL_CLK_THROTTLE_TYPE_MAX ; i++) { 364 if (!(hdev->clk_throttling.aggregated_reason & BIT(i))) 365 continue; 366 367 clk_throttle.clk_throttling_timestamp_us[i] = 368 ktime_to_us(hdev->clk_throttling.timestamp[i].start); 369 370 if (ktime_compare(hdev->clk_throttling.timestamp[i].end, zero_time)) 371 end_time = hdev->clk_throttling.timestamp[i].end; 372 else 373 end_time = ktime_get(); 374 375 clk_throttle.clk_throttling_duration_ns[i] = 376 ktime_to_ns(ktime_sub(end_time, 377 hdev->clk_throttling.timestamp[i].start)); 378 379 } 380 mutex_unlock(&hdev->clk_throttling.lock); 381 382 return copy_to_user(out, &clk_throttle, 383 min((size_t) max_size, sizeof(clk_throttle))) ? -EFAULT : 0; 384 } 385 386 static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 387 { 388 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 389 struct hl_info_cs_counters cs_counters = {0}; 390 struct hl_device *hdev = hpriv->hdev; 391 struct hl_cs_counters_atomic *cntr; 392 u32 max_size = args->return_size; 393 394 cntr = &hdev->aggregated_cs_counters; 395 396 if ((!max_size) || (!out)) 397 return -EINVAL; 398 399 cs_counters.total_out_of_mem_drop_cnt = 400 atomic64_read(&cntr->out_of_mem_drop_cnt); 401 cs_counters.total_parsing_drop_cnt = 402 atomic64_read(&cntr->parsing_drop_cnt); 403 cs_counters.total_queue_full_drop_cnt = 404 atomic64_read(&cntr->queue_full_drop_cnt); 405 cs_counters.total_device_in_reset_drop_cnt = 406 atomic64_read(&cntr->device_in_reset_drop_cnt); 407 cs_counters.total_max_cs_in_flight_drop_cnt = 408 atomic64_read(&cntr->max_cs_in_flight_drop_cnt); 409 cs_counters.total_validation_drop_cnt = 410 atomic64_read(&cntr->validation_drop_cnt); 411 412 if (hpriv->ctx) { 413 cs_counters.ctx_out_of_mem_drop_cnt = 414 atomic64_read( 415 &hpriv->ctx->cs_counters.out_of_mem_drop_cnt); 416 cs_counters.ctx_parsing_drop_cnt = 417 atomic64_read( 418 &hpriv->ctx->cs_counters.parsing_drop_cnt); 419 cs_counters.ctx_queue_full_drop_cnt = 420 atomic64_read( 421 &hpriv->ctx->cs_counters.queue_full_drop_cnt); 422 cs_counters.ctx_device_in_reset_drop_cnt = 423 atomic64_read( 424 &hpriv->ctx->cs_counters.device_in_reset_drop_cnt); 425 cs_counters.ctx_max_cs_in_flight_drop_cnt = 426 atomic64_read( 427 &hpriv->ctx->cs_counters.max_cs_in_flight_drop_cnt); 428 cs_counters.ctx_validation_drop_cnt = 429 atomic64_read( 430 &hpriv->ctx->cs_counters.validation_drop_cnt); 431 } 432 433 return copy_to_user(out, &cs_counters, 434 min((size_t) max_size, sizeof(cs_counters))) ? -EFAULT : 0; 435 } 436 437 static int sync_manager_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 438 { 439 struct hl_device *hdev = hpriv->hdev; 440 struct asic_fixed_properties *prop = &hdev->asic_prop; 441 struct hl_info_sync_manager sm_info = {0}; 442 u32 max_size = args->return_size; 443 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 444 445 if ((!max_size) || (!out)) 446 return -EINVAL; 447 448 if (args->dcore_id >= HL_MAX_DCORES) 449 return -EINVAL; 450 451 sm_info.first_available_sync_object = 452 prop->first_available_user_sob[args->dcore_id]; 453 sm_info.first_available_monitor = 454 prop->first_available_user_mon[args->dcore_id]; 455 sm_info.first_available_cq = 456 prop->first_available_cq[args->dcore_id]; 457 458 return copy_to_user(out, &sm_info, min_t(size_t, (size_t) max_size, 459 sizeof(sm_info))) ? -EFAULT : 0; 460 } 461 462 static int total_energy_consumption_info(struct hl_fpriv *hpriv, 463 struct hl_info_args *args) 464 { 465 struct hl_device *hdev = hpriv->hdev; 466 struct hl_info_energy total_energy = {0}; 467 u32 max_size = args->return_size; 468 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 469 int rc; 470 471 if ((!max_size) || (!out)) 472 return -EINVAL; 473 474 rc = hl_fw_cpucp_total_energy_get(hdev, 475 &total_energy.total_energy_consumption); 476 if (rc) 477 return rc; 478 479 return copy_to_user(out, &total_energy, 480 min((size_t) max_size, sizeof(total_energy))) ? -EFAULT : 0; 481 } 482 483 static int pll_frequency_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 484 { 485 struct hl_device *hdev = hpriv->hdev; 486 struct hl_pll_frequency_info freq_info = { {0} }; 487 u32 max_size = args->return_size; 488 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 489 int rc; 490 491 if ((!max_size) || (!out)) 492 return -EINVAL; 493 494 rc = hl_fw_cpucp_pll_info_get(hdev, args->pll_index, freq_info.output); 495 if (rc) 496 return rc; 497 498 return copy_to_user(out, &freq_info, 499 min((size_t) max_size, sizeof(freq_info))) ? -EFAULT : 0; 500 } 501 502 static int power_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 503 { 504 struct hl_device *hdev = hpriv->hdev; 505 u32 max_size = args->return_size; 506 struct hl_power_info power_info = {0}; 507 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 508 int rc; 509 510 if ((!max_size) || (!out)) 511 return -EINVAL; 512 513 rc = hl_fw_cpucp_power_get(hdev, &power_info.power); 514 if (rc) 515 return rc; 516 517 return copy_to_user(out, &power_info, 518 min((size_t) max_size, sizeof(power_info))) ? -EFAULT : 0; 519 } 520 521 static int open_stats_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 522 { 523 struct hl_device *hdev = hpriv->hdev; 524 u32 max_size = args->return_size; 525 struct hl_open_stats_info open_stats_info = {0}; 526 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 527 528 if ((!max_size) || (!out)) 529 return -EINVAL; 530 531 open_stats_info.last_open_period_ms = jiffies64_to_msecs( 532 hdev->last_open_session_duration_jif); 533 open_stats_info.open_counter = hdev->open_counter; 534 open_stats_info.is_compute_ctx_active = hdev->is_compute_ctx_active; 535 open_stats_info.compute_ctx_in_release = hdev->compute_ctx_in_release; 536 537 return copy_to_user(out, &open_stats_info, 538 min((size_t) max_size, sizeof(open_stats_info))) ? -EFAULT : 0; 539 } 540 541 static int dram_pending_rows_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 542 { 543 struct hl_device *hdev = hpriv->hdev; 544 u32 max_size = args->return_size; 545 u32 pend_rows_num = 0; 546 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 547 int rc; 548 549 if ((!max_size) || (!out)) 550 return -EINVAL; 551 552 rc = hl_fw_dram_pending_row_get(hdev, &pend_rows_num); 553 if (rc) 554 return rc; 555 556 return copy_to_user(out, &pend_rows_num, 557 min_t(size_t, max_size, sizeof(pend_rows_num))) ? -EFAULT : 0; 558 } 559 560 static int dram_replaced_rows_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 561 { 562 struct hl_device *hdev = hpriv->hdev; 563 u32 max_size = args->return_size; 564 struct cpucp_hbm_row_info info = {0}; 565 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 566 int rc; 567 568 if ((!max_size) || (!out)) 569 return -EINVAL; 570 571 rc = hl_fw_dram_replaced_row_get(hdev, &info); 572 if (rc) 573 return rc; 574 575 return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0; 576 } 577 578 static int last_err_open_dev_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 579 { 580 struct hl_info_last_err_open_dev_time info = {0}; 581 struct hl_device *hdev = hpriv->hdev; 582 u32 max_size = args->return_size; 583 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 584 585 if ((!max_size) || (!out)) 586 return -EINVAL; 587 588 info.timestamp = ktime_to_ns(hdev->last_successful_open_ktime); 589 590 return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0; 591 } 592 593 static int cs_timeout_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 594 { 595 struct hl_info_cs_timeout_event info = {0}; 596 struct hl_device *hdev = hpriv->hdev; 597 u32 max_size = args->return_size; 598 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 599 600 if ((!max_size) || (!out)) 601 return -EINVAL; 602 603 info.seq = hdev->captured_err_info.cs_timeout.seq; 604 info.timestamp = ktime_to_ns(hdev->captured_err_info.cs_timeout.timestamp); 605 606 return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0; 607 } 608 609 static int razwi_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 610 { 611 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 612 struct hl_device *hdev = hpriv->hdev; 613 u32 max_size = args->return_size; 614 struct razwi_info *razwi_info; 615 616 if ((!max_size) || (!out)) 617 return -EINVAL; 618 619 razwi_info = &hdev->captured_err_info.razwi_info; 620 if (!razwi_info->razwi_info_available) 621 return 0; 622 623 return copy_to_user(out, &razwi_info->razwi, 624 min_t(size_t, max_size, sizeof(struct hl_info_razwi_event))) ? -EFAULT : 0; 625 } 626 627 static int undefined_opcode_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 628 { 629 struct hl_device *hdev = hpriv->hdev; 630 u32 max_size = args->return_size; 631 struct hl_info_undefined_opcode_event info = {0}; 632 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 633 634 if ((!max_size) || (!out)) 635 return -EINVAL; 636 637 info.timestamp = ktime_to_ns(hdev->captured_err_info.undef_opcode.timestamp); 638 info.engine_id = hdev->captured_err_info.undef_opcode.engine_id; 639 info.cq_addr = hdev->captured_err_info.undef_opcode.cq_addr; 640 info.cq_size = hdev->captured_err_info.undef_opcode.cq_size; 641 info.stream_id = hdev->captured_err_info.undef_opcode.stream_id; 642 info.cb_addr_streams_len = hdev->captured_err_info.undef_opcode.cb_addr_streams_len; 643 memcpy(info.cb_addr_streams, hdev->captured_err_info.undef_opcode.cb_addr_streams, 644 sizeof(info.cb_addr_streams)); 645 646 return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0; 647 } 648 649 static int dev_mem_alloc_page_sizes_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 650 { 651 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 652 struct hl_info_dev_memalloc_page_sizes info = {0}; 653 struct hl_device *hdev = hpriv->hdev; 654 u32 max_size = args->return_size; 655 656 if ((!max_size) || (!out)) 657 return -EINVAL; 658 659 /* 660 * Future ASICs that will support multiple DRAM page sizes will support only "powers of 2" 661 * pages (unlike some of the ASICs before supporting multiple page sizes). 662 * For this reason for all ASICs that not support multiple page size the function will 663 * return an empty bitmask indicating that multiple page sizes is not supported. 664 */ 665 info.page_order_bitmask = hdev->asic_prop.dmmu.supported_pages_mask; 666 667 return copy_to_user(out, &info, min_t(size_t, max_size, sizeof(info))) ? -EFAULT : 0; 668 } 669 670 static int sec_attest_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 671 { 672 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 673 struct cpucp_sec_attest_info *sec_attest_info; 674 struct hl_info_sec_attest *info; 675 u32 max_size = args->return_size; 676 int rc; 677 678 if ((!max_size) || (!out)) 679 return -EINVAL; 680 681 sec_attest_info = kmalloc(sizeof(*sec_attest_info), GFP_KERNEL); 682 if (!sec_attest_info) 683 return -ENOMEM; 684 685 info = kzalloc(sizeof(*info), GFP_KERNEL); 686 if (!info) { 687 rc = -ENOMEM; 688 goto free_sec_attest_info; 689 } 690 691 rc = hl_fw_get_sec_attest_info(hpriv->hdev, sec_attest_info, args->sec_attest_nonce); 692 if (rc) 693 goto free_info; 694 695 info->nonce = le32_to_cpu(sec_attest_info->nonce); 696 info->pcr_quote_len = le16_to_cpu(sec_attest_info->pcr_quote_len); 697 info->pub_data_len = le16_to_cpu(sec_attest_info->pub_data_len); 698 info->certificate_len = le16_to_cpu(sec_attest_info->certificate_len); 699 info->pcr_num_reg = sec_attest_info->pcr_num_reg; 700 info->pcr_reg_len = sec_attest_info->pcr_reg_len; 701 info->quote_sig_len = sec_attest_info->quote_sig_len; 702 memcpy(&info->pcr_data, &sec_attest_info->pcr_data, sizeof(info->pcr_data)); 703 memcpy(&info->pcr_quote, &sec_attest_info->pcr_quote, sizeof(info->pcr_quote)); 704 memcpy(&info->public_data, &sec_attest_info->public_data, sizeof(info->public_data)); 705 memcpy(&info->certificate, &sec_attest_info->certificate, sizeof(info->certificate)); 706 memcpy(&info->quote_sig, &sec_attest_info->quote_sig, sizeof(info->quote_sig)); 707 708 rc = copy_to_user(out, info, 709 min_t(size_t, max_size, sizeof(*info))) ? -EFAULT : 0; 710 711 free_info: 712 kfree(info); 713 free_sec_attest_info: 714 kfree(sec_attest_info); 715 716 return rc; 717 } 718 719 static int eventfd_register(struct hl_fpriv *hpriv, struct hl_info_args *args) 720 { 721 int rc; 722 723 /* check if there is already a registered on that process */ 724 mutex_lock(&hpriv->notifier_event.lock); 725 if (hpriv->notifier_event.eventfd) { 726 mutex_unlock(&hpriv->notifier_event.lock); 727 return -EINVAL; 728 } 729 730 hpriv->notifier_event.eventfd = eventfd_ctx_fdget(args->eventfd); 731 if (IS_ERR(hpriv->notifier_event.eventfd)) { 732 rc = PTR_ERR(hpriv->notifier_event.eventfd); 733 hpriv->notifier_event.eventfd = NULL; 734 mutex_unlock(&hpriv->notifier_event.lock); 735 return rc; 736 } 737 738 mutex_unlock(&hpriv->notifier_event.lock); 739 return 0; 740 } 741 742 static int eventfd_unregister(struct hl_fpriv *hpriv, struct hl_info_args *args) 743 { 744 mutex_lock(&hpriv->notifier_event.lock); 745 if (!hpriv->notifier_event.eventfd) { 746 mutex_unlock(&hpriv->notifier_event.lock); 747 return -EINVAL; 748 } 749 750 eventfd_ctx_put(hpriv->notifier_event.eventfd); 751 hpriv->notifier_event.eventfd = NULL; 752 mutex_unlock(&hpriv->notifier_event.lock); 753 return 0; 754 } 755 756 static int engine_status_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 757 { 758 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 759 u32 status_buf_size = args->return_size; 760 struct hl_device *hdev = hpriv->hdev; 761 struct engines_data eng_data; 762 int rc; 763 764 if ((status_buf_size < SZ_1K) || (status_buf_size > HL_ENGINES_DATA_MAX_SIZE) || (!out)) 765 return -EINVAL; 766 767 eng_data.actual_size = 0; 768 eng_data.allocated_buf_size = status_buf_size; 769 eng_data.buf = vmalloc(status_buf_size); 770 if (!eng_data.buf) 771 return -ENOMEM; 772 773 hdev->asic_funcs->is_device_idle(hdev, NULL, 0, &eng_data); 774 775 if (eng_data.actual_size > eng_data.allocated_buf_size) { 776 dev_err(hdev->dev, 777 "Engines data size (%d Bytes) is bigger than allocated size (%u Bytes)\n", 778 eng_data.actual_size, status_buf_size); 779 vfree(eng_data.buf); 780 return -ENOMEM; 781 } 782 783 args->user_buffer_actual_size = eng_data.actual_size; 784 rc = copy_to_user(out, eng_data.buf, min_t(size_t, status_buf_size, eng_data.actual_size)) ? 785 -EFAULT : 0; 786 787 vfree(eng_data.buf); 788 789 return rc; 790 } 791 792 static int page_fault_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 793 { 794 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 795 struct hl_device *hdev = hpriv->hdev; 796 u32 max_size = args->return_size; 797 struct page_fault_info *pgf_info; 798 799 if ((!max_size) || (!out)) 800 return -EINVAL; 801 802 pgf_info = &hdev->captured_err_info.page_fault_info; 803 if (!pgf_info->page_fault_info_available) 804 return 0; 805 806 return copy_to_user(out, &pgf_info->page_fault, 807 min_t(size_t, max_size, sizeof(struct hl_page_fault_info))) ? -EFAULT : 0; 808 } 809 810 static int user_mappings_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 811 { 812 void __user *out = (void __user *) (uintptr_t) args->return_pointer; 813 u32 user_buf_size = args->return_size; 814 struct hl_device *hdev = hpriv->hdev; 815 struct page_fault_info *pgf_info; 816 u64 actual_size; 817 818 if (!out) 819 return -EINVAL; 820 821 pgf_info = &hdev->captured_err_info.page_fault_info; 822 if (!pgf_info->page_fault_info_available) 823 return 0; 824 825 args->array_size = pgf_info->num_of_user_mappings; 826 827 actual_size = pgf_info->num_of_user_mappings * sizeof(struct hl_user_mapping); 828 if (user_buf_size < actual_size) 829 return -ENOMEM; 830 831 return copy_to_user(out, pgf_info->user_mappings, actual_size) ? -EFAULT : 0; 832 } 833 834 static int hw_err_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 835 { 836 void __user *user_buf = (void __user *) (uintptr_t) args->return_pointer; 837 struct hl_device *hdev = hpriv->hdev; 838 u32 user_buf_size = args->return_size; 839 struct hw_err_info *info; 840 int rc; 841 842 if (!user_buf) 843 return -EINVAL; 844 845 info = &hdev->captured_err_info.hw_err; 846 if (!info->event_info_available) 847 return 0; 848 849 if (user_buf_size < sizeof(struct hl_info_hw_err_event)) 850 return -ENOMEM; 851 852 rc = copy_to_user(user_buf, &info->event, sizeof(struct hl_info_hw_err_event)); 853 return rc ? -EFAULT : 0; 854 } 855 856 static int fw_err_info(struct hl_fpriv *hpriv, struct hl_info_args *args) 857 { 858 void __user *user_buf = (void __user *) (uintptr_t) args->return_pointer; 859 struct hl_device *hdev = hpriv->hdev; 860 u32 user_buf_size = args->return_size; 861 struct fw_err_info *info; 862 int rc; 863 864 if (!user_buf) 865 return -EINVAL; 866 867 info = &hdev->captured_err_info.fw_err; 868 if (!info->event_info_available) 869 return 0; 870 871 if (user_buf_size < sizeof(struct hl_info_fw_err_event)) 872 return -ENOMEM; 873 874 rc = copy_to_user(user_buf, &info->event, sizeof(struct hl_info_fw_err_event)); 875 return rc ? -EFAULT : 0; 876 } 877 878 static int send_fw_generic_request(struct hl_device *hdev, struct hl_info_args *info_args) 879 { 880 void __user *buff = (void __user *) (uintptr_t) info_args->return_pointer; 881 u32 size = info_args->return_size; 882 dma_addr_t dma_handle; 883 bool need_input_buff; 884 void *fw_buff; 885 int rc = 0; 886 887 switch (info_args->fw_sub_opcode) { 888 case HL_PASSTHROUGH_VERSIONS: 889 need_input_buff = false; 890 break; 891 default: 892 return -EINVAL; 893 } 894 895 if (size > SZ_1M) { 896 dev_err(hdev->dev, "buffer size cannot exceed 1MB\n"); 897 return -EINVAL; 898 } 899 900 fw_buff = hl_cpu_accessible_dma_pool_alloc(hdev, size, &dma_handle); 901 if (!fw_buff) 902 return -ENOMEM; 903 904 905 if (need_input_buff && copy_from_user(fw_buff, buff, size)) { 906 dev_dbg(hdev->dev, "Failed to copy from user FW buff\n"); 907 rc = -EFAULT; 908 goto free_buff; 909 } 910 911 rc = hl_fw_send_generic_request(hdev, info_args->fw_sub_opcode, dma_handle, &size); 912 if (rc) 913 goto free_buff; 914 915 if (copy_to_user(buff, fw_buff, min(size, info_args->return_size))) { 916 dev_dbg(hdev->dev, "Failed to copy to user FW generic req output\n"); 917 rc = -EFAULT; 918 } 919 920 free_buff: 921 hl_cpu_accessible_dma_pool_free(hdev, info_args->return_size, fw_buff); 922 923 return rc; 924 } 925 926 static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data, 927 struct device *dev) 928 { 929 enum hl_device_status status; 930 struct hl_info_args *args = data; 931 struct hl_device *hdev = hpriv->hdev; 932 int rc; 933 934 if (args->pad) { 935 dev_dbg(hdev->dev, "Padding bytes must be 0\n"); 936 return -EINVAL; 937 } 938 939 /* 940 * Information is returned for the following opcodes even if the device 941 * is disabled or in reset. 942 */ 943 switch (args->op) { 944 case HL_INFO_HW_IP_INFO: 945 return hw_ip_info(hdev, args); 946 947 case HL_INFO_DEVICE_STATUS: 948 return device_status_info(hdev, args); 949 950 case HL_INFO_RESET_COUNT: 951 return get_reset_count(hdev, args); 952 953 case HL_INFO_HW_EVENTS: 954 return hw_events_info(hdev, false, args); 955 956 case HL_INFO_HW_EVENTS_AGGREGATE: 957 return hw_events_info(hdev, true, args); 958 959 case HL_INFO_CS_COUNTERS: 960 return cs_counters_info(hpriv, args); 961 962 case HL_INFO_CLK_THROTTLE_REASON: 963 return clk_throttle_info(hpriv, args); 964 965 case HL_INFO_SYNC_MANAGER: 966 return sync_manager_info(hpriv, args); 967 968 case HL_INFO_OPEN_STATS: 969 return open_stats_info(hpriv, args); 970 971 case HL_INFO_LAST_ERR_OPEN_DEV_TIME: 972 return last_err_open_dev_info(hpriv, args); 973 974 case HL_INFO_CS_TIMEOUT_EVENT: 975 return cs_timeout_info(hpriv, args); 976 977 case HL_INFO_RAZWI_EVENT: 978 return razwi_info(hpriv, args); 979 980 case HL_INFO_UNDEFINED_OPCODE_EVENT: 981 return undefined_opcode_info(hpriv, args); 982 983 case HL_INFO_DEV_MEM_ALLOC_PAGE_SIZES: 984 return dev_mem_alloc_page_sizes_info(hpriv, args); 985 986 case HL_INFO_GET_EVENTS: 987 return events_info(hpriv, args); 988 989 case HL_INFO_PAGE_FAULT_EVENT: 990 return page_fault_info(hpriv, args); 991 992 case HL_INFO_USER_MAPPINGS: 993 return user_mappings_info(hpriv, args); 994 995 case HL_INFO_UNREGISTER_EVENTFD: 996 return eventfd_unregister(hpriv, args); 997 998 case HL_INFO_HW_ERR_EVENT: 999 return hw_err_info(hpriv, args); 1000 1001 case HL_INFO_FW_ERR_EVENT: 1002 return fw_err_info(hpriv, args); 1003 1004 case HL_INFO_DRAM_USAGE: 1005 return dram_usage_info(hpriv, args); 1006 default: 1007 break; 1008 } 1009 1010 if (!hl_device_operational(hdev, &status)) { 1011 dev_dbg_ratelimited(dev, 1012 "Device is %s. Can't execute INFO IOCTL\n", 1013 hdev->status[status]); 1014 return -EBUSY; 1015 } 1016 1017 switch (args->op) { 1018 case HL_INFO_HW_IDLE: 1019 rc = hw_idle(hdev, args); 1020 break; 1021 1022 case HL_INFO_DEVICE_UTILIZATION: 1023 rc = device_utilization(hdev, args); 1024 break; 1025 1026 case HL_INFO_CLK_RATE: 1027 rc = get_clk_rate(hdev, args); 1028 break; 1029 1030 case HL_INFO_TIME_SYNC: 1031 return time_sync_info(hdev, args); 1032 1033 case HL_INFO_PCI_COUNTERS: 1034 return pci_counters_info(hpriv, args); 1035 1036 case HL_INFO_TOTAL_ENERGY: 1037 return total_energy_consumption_info(hpriv, args); 1038 1039 case HL_INFO_PLL_FREQUENCY: 1040 return pll_frequency_info(hpriv, args); 1041 1042 case HL_INFO_POWER: 1043 return power_info(hpriv, args); 1044 1045 1046 case HL_INFO_DRAM_REPLACED_ROWS: 1047 return dram_replaced_rows_info(hpriv, args); 1048 1049 case HL_INFO_DRAM_PENDING_ROWS: 1050 return dram_pending_rows_info(hpriv, args); 1051 1052 case HL_INFO_SECURED_ATTESTATION: 1053 return sec_attest_info(hpriv, args); 1054 1055 case HL_INFO_REGISTER_EVENTFD: 1056 return eventfd_register(hpriv, args); 1057 1058 case HL_INFO_ENGINE_STATUS: 1059 return engine_status_info(hpriv, args); 1060 1061 case HL_INFO_FW_GENERIC_REQ: 1062 return send_fw_generic_request(hdev, args); 1063 1064 default: 1065 dev_err(dev, "Invalid request %d\n", args->op); 1066 rc = -EINVAL; 1067 break; 1068 } 1069 1070 return rc; 1071 } 1072 1073 static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data) 1074 { 1075 return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev); 1076 } 1077 1078 static int hl_info_ioctl_control(struct hl_fpriv *hpriv, void *data) 1079 { 1080 return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev_ctrl); 1081 } 1082 1083 static int hl_debug_ioctl(struct hl_fpriv *hpriv, void *data) 1084 { 1085 struct hl_debug_args *args = data; 1086 struct hl_device *hdev = hpriv->hdev; 1087 enum hl_device_status status; 1088 1089 int rc = 0; 1090 1091 if (!hl_device_operational(hdev, &status)) { 1092 dev_dbg_ratelimited(hdev->dev, 1093 "Device is %s. Can't execute DEBUG IOCTL\n", 1094 hdev->status[status]); 1095 return -EBUSY; 1096 } 1097 1098 switch (args->op) { 1099 case HL_DEBUG_OP_ETR: 1100 case HL_DEBUG_OP_ETF: 1101 case HL_DEBUG_OP_STM: 1102 case HL_DEBUG_OP_FUNNEL: 1103 case HL_DEBUG_OP_BMON: 1104 case HL_DEBUG_OP_SPMU: 1105 case HL_DEBUG_OP_TIMESTAMP: 1106 if (!hdev->in_debug) { 1107 dev_err_ratelimited(hdev->dev, 1108 "Rejecting debug configuration request because device not in debug mode\n"); 1109 return -EFAULT; 1110 } 1111 args->input_size = min(args->input_size, hl_debug_struct_size[args->op]); 1112 rc = debug_coresight(hdev, hpriv->ctx, args); 1113 break; 1114 1115 case HL_DEBUG_OP_SET_MODE: 1116 rc = hl_device_set_debug_mode(hdev, hpriv->ctx, (bool) args->enable); 1117 break; 1118 1119 default: 1120 dev_err(hdev->dev, "Invalid request %d\n", args->op); 1121 rc = -EINVAL; 1122 break; 1123 } 1124 1125 return rc; 1126 } 1127 1128 #define HL_IOCTL_DEF(ioctl, _func) \ 1129 [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func} 1130 1131 static const struct hl_ioctl_desc hl_ioctls[] = { 1132 HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl), 1133 HL_IOCTL_DEF(HL_IOCTL_CB, hl_cb_ioctl), 1134 HL_IOCTL_DEF(HL_IOCTL_CS, hl_cs_ioctl), 1135 HL_IOCTL_DEF(HL_IOCTL_WAIT_CS, hl_wait_ioctl), 1136 HL_IOCTL_DEF(HL_IOCTL_MEMORY, hl_mem_ioctl), 1137 HL_IOCTL_DEF(HL_IOCTL_DEBUG, hl_debug_ioctl) 1138 }; 1139 1140 static const struct hl_ioctl_desc hl_ioctls_control[] = { 1141 HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl_control) 1142 }; 1143 1144 static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg, 1145 const struct hl_ioctl_desc *ioctl, struct device *dev) 1146 { 1147 struct hl_fpriv *hpriv = filep->private_data; 1148 unsigned int nr = _IOC_NR(cmd); 1149 char stack_kdata[128] = {0}; 1150 char *kdata = NULL; 1151 unsigned int usize, asize; 1152 hl_ioctl_t *func; 1153 u32 hl_size; 1154 int retcode; 1155 1156 /* Do not trust userspace, use our own definition */ 1157 func = ioctl->func; 1158 1159 if (unlikely(!func)) { 1160 dev_dbg(dev, "no function\n"); 1161 retcode = -ENOTTY; 1162 goto out_err; 1163 } 1164 1165 hl_size = _IOC_SIZE(ioctl->cmd); 1166 usize = asize = _IOC_SIZE(cmd); 1167 if (hl_size > asize) 1168 asize = hl_size; 1169 1170 cmd = ioctl->cmd; 1171 1172 if (cmd & (IOC_IN | IOC_OUT)) { 1173 if (asize <= sizeof(stack_kdata)) { 1174 kdata = stack_kdata; 1175 } else { 1176 kdata = kzalloc(asize, GFP_KERNEL); 1177 if (!kdata) { 1178 retcode = -ENOMEM; 1179 goto out_err; 1180 } 1181 } 1182 } 1183 1184 if (cmd & IOC_IN) { 1185 if (copy_from_user(kdata, (void __user *)arg, usize)) { 1186 retcode = -EFAULT; 1187 goto out_err; 1188 } 1189 } 1190 1191 retcode = func(hpriv, kdata); 1192 1193 if ((cmd & IOC_OUT) && copy_to_user((void __user *)arg, kdata, usize)) 1194 retcode = -EFAULT; 1195 1196 out_err: 1197 if (retcode) 1198 dev_dbg_ratelimited(dev, "error in ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n", 1199 task_pid_nr(current), cmd, nr); 1200 1201 if (kdata != stack_kdata) 1202 kfree(kdata); 1203 1204 return retcode; 1205 } 1206 1207 long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) 1208 { 1209 struct hl_fpriv *hpriv = filep->private_data; 1210 struct hl_device *hdev = hpriv->hdev; 1211 const struct hl_ioctl_desc *ioctl = NULL; 1212 unsigned int nr = _IOC_NR(cmd); 1213 1214 if (!hdev) { 1215 pr_err_ratelimited("Sending ioctl after device was removed! Please close FD\n"); 1216 return -ENODEV; 1217 } 1218 1219 if ((nr >= HL_COMMAND_START) && (nr < HL_COMMAND_END)) { 1220 ioctl = &hl_ioctls[nr]; 1221 } else { 1222 dev_dbg_ratelimited(hdev->dev, "invalid ioctl: pid=%d, nr=0x%02x\n", 1223 task_pid_nr(current), nr); 1224 return -ENOTTY; 1225 } 1226 1227 return _hl_ioctl(filep, cmd, arg, ioctl, hdev->dev); 1228 } 1229 1230 long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg) 1231 { 1232 struct hl_fpriv *hpriv = filep->private_data; 1233 struct hl_device *hdev = hpriv->hdev; 1234 const struct hl_ioctl_desc *ioctl = NULL; 1235 unsigned int nr = _IOC_NR(cmd); 1236 1237 if (!hdev) { 1238 pr_err_ratelimited("Sending ioctl after device was removed! Please close FD\n"); 1239 return -ENODEV; 1240 } 1241 1242 if (nr == _IOC_NR(HL_IOCTL_INFO)) { 1243 ioctl = &hl_ioctls_control[nr]; 1244 } else { 1245 dev_dbg_ratelimited(hdev->dev_ctrl, "invalid ioctl: pid=%d, nr=0x%02x\n", 1246 task_pid_nr(current), nr); 1247 return -ENOTTY; 1248 } 1249 1250 return _hl_ioctl(filep, cmd, arg, ioctl, hdev->dev_ctrl); 1251 } 1252