1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020-2023 Intel Corporation 4 */ 5 6 #include <linux/firmware.h> 7 #include <linux/module.h> 8 #include <linux/pci.h> 9 10 #include <drm/drm_accel.h> 11 #include <drm/drm_file.h> 12 #include <drm/drm_gem.h> 13 #include <drm/drm_ioctl.h> 14 #include <drm/drm_prime.h> 15 16 #include "vpu_boot_api.h" 17 #include "ivpu_drv.h" 18 #include "ivpu_fw.h" 19 #include "ivpu_gem.h" 20 #include "ivpu_hw.h" 21 #include "ivpu_ipc.h" 22 #include "ivpu_job.h" 23 #include "ivpu_jsm_msg.h" 24 #include "ivpu_mmu.h" 25 #include "ivpu_mmu_context.h" 26 #include "ivpu_pm.h" 27 28 #ifndef DRIVER_VERSION_STR 29 #define DRIVER_VERSION_STR __stringify(DRM_IVPU_DRIVER_MAJOR) "." \ 30 __stringify(DRM_IVPU_DRIVER_MINOR) "." 31 #endif 32 33 static const struct drm_driver driver; 34 35 static struct lock_class_key submitted_jobs_xa_lock_class_key; 36 37 int ivpu_dbg_mask; 38 module_param_named(dbg_mask, ivpu_dbg_mask, int, 0644); 39 MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros."); 40 41 int ivpu_test_mode; 42 module_param_named_unsafe(test_mode, ivpu_test_mode, int, 0644); 43 MODULE_PARM_DESC(test_mode, "Test mode: 0 - normal operation, 1 - fw unit test, 2 - null hw"); 44 45 u8 ivpu_pll_min_ratio; 46 module_param_named(pll_min_ratio, ivpu_pll_min_ratio, byte, 0644); 47 MODULE_PARM_DESC(pll_min_ratio, "Minimum PLL ratio used to set VPU frequency"); 48 49 u8 ivpu_pll_max_ratio = U8_MAX; 50 module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644); 51 MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set VPU frequency"); 52 53 bool ivpu_disable_mmu_cont_pages; 54 module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0644); 55 MODULE_PARM_DESC(disable_mmu_cont_pages, "Disable MMU contiguous pages optimization"); 56 57 struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv) 58 { 59 struct ivpu_device *vdev = file_priv->vdev; 60 61 kref_get(&file_priv->ref); 62 63 ivpu_dbg(vdev, KREF, "file_priv get: ctx %u refcount %u\n", 64 file_priv->ctx.id, kref_read(&file_priv->ref)); 65 66 return file_priv; 67 } 68 69 struct ivpu_file_priv *ivpu_file_priv_get_by_ctx_id(struct ivpu_device *vdev, unsigned long id) 70 { 71 struct ivpu_file_priv *file_priv; 72 73 xa_lock_irq(&vdev->context_xa); 74 file_priv = xa_load(&vdev->context_xa, id); 75 /* file_priv may still be in context_xa during file_priv_release() */ 76 if (file_priv && !kref_get_unless_zero(&file_priv->ref)) 77 file_priv = NULL; 78 xa_unlock_irq(&vdev->context_xa); 79 80 if (file_priv) 81 ivpu_dbg(vdev, KREF, "file_priv get by id: ctx %u refcount %u\n", 82 file_priv->ctx.id, kref_read(&file_priv->ref)); 83 84 return file_priv; 85 } 86 87 static void file_priv_release(struct kref *ref) 88 { 89 struct ivpu_file_priv *file_priv = container_of(ref, struct ivpu_file_priv, ref); 90 struct ivpu_device *vdev = file_priv->vdev; 91 92 ivpu_dbg(vdev, FILE, "file_priv release: ctx %u\n", file_priv->ctx.id); 93 94 ivpu_cmdq_release_all(file_priv); 95 ivpu_bo_remove_all_bos_from_context(&file_priv->ctx); 96 ivpu_jsm_context_release(vdev, file_priv->ctx.id); 97 ivpu_mmu_user_context_fini(vdev, &file_priv->ctx); 98 drm_WARN_ON(&vdev->drm, xa_erase_irq(&vdev->context_xa, file_priv->ctx.id) != file_priv); 99 mutex_destroy(&file_priv->lock); 100 kfree(file_priv); 101 } 102 103 void ivpu_file_priv_put(struct ivpu_file_priv **link) 104 { 105 struct ivpu_file_priv *file_priv = *link; 106 struct ivpu_device *vdev = file_priv->vdev; 107 108 drm_WARN_ON(&vdev->drm, !file_priv); 109 110 ivpu_dbg(vdev, KREF, "file_priv put: ctx %u refcount %u\n", 111 file_priv->ctx.id, kref_read(&file_priv->ref)); 112 113 *link = NULL; 114 kref_put(&file_priv->ref, file_priv_release); 115 } 116 117 static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 118 { 119 struct ivpu_file_priv *file_priv = file->driver_priv; 120 struct ivpu_device *vdev = file_priv->vdev; 121 struct pci_dev *pdev = to_pci_dev(vdev->drm.dev); 122 struct drm_ivpu_param *args = data; 123 int ret = 0; 124 int idx; 125 126 if (!drm_dev_enter(dev, &idx)) 127 return -ENODEV; 128 129 switch (args->param) { 130 case DRM_IVPU_PARAM_DEVICE_ID: 131 args->value = pdev->device; 132 break; 133 case DRM_IVPU_PARAM_DEVICE_REVISION: 134 args->value = pdev->revision; 135 break; 136 case DRM_IVPU_PARAM_PLATFORM_TYPE: 137 args->value = vdev->platform; 138 break; 139 case DRM_IVPU_PARAM_CORE_CLOCK_RATE: 140 args->value = ivpu_hw_reg_pll_freq_get(vdev); 141 break; 142 case DRM_IVPU_PARAM_NUM_CONTEXTS: 143 args->value = ivpu_get_context_count(vdev); 144 break; 145 case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS: 146 args->value = vdev->hw->ranges.user_low.start; 147 break; 148 case DRM_IVPU_PARAM_CONTEXT_PRIORITY: 149 args->value = file_priv->priority; 150 break; 151 case DRM_IVPU_PARAM_CONTEXT_ID: 152 args->value = file_priv->ctx.id; 153 break; 154 case DRM_IVPU_PARAM_FW_API_VERSION: 155 if (args->index < VPU_FW_API_VER_NUM) { 156 struct vpu_firmware_header *fw_hdr; 157 158 fw_hdr = (struct vpu_firmware_header *)vdev->fw->file->data; 159 args->value = fw_hdr->api_version[args->index]; 160 } else { 161 ret = -EINVAL; 162 } 163 break; 164 case DRM_IVPU_PARAM_ENGINE_HEARTBEAT: 165 ret = ivpu_jsm_get_heartbeat(vdev, args->index, &args->value); 166 break; 167 case DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID: 168 args->value = (u64)atomic64_inc_return(&vdev->unique_id_counter); 169 break; 170 case DRM_IVPU_PARAM_TILE_CONFIG: 171 args->value = vdev->hw->tile_fuse; 172 break; 173 case DRM_IVPU_PARAM_SKU: 174 args->value = vdev->hw->sku; 175 break; 176 default: 177 ret = -EINVAL; 178 break; 179 } 180 181 drm_dev_exit(idx); 182 return ret; 183 } 184 185 static int ivpu_set_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 186 { 187 struct ivpu_file_priv *file_priv = file->driver_priv; 188 struct drm_ivpu_param *args = data; 189 int ret = 0; 190 191 switch (args->param) { 192 case DRM_IVPU_PARAM_CONTEXT_PRIORITY: 193 if (args->value <= DRM_IVPU_CONTEXT_PRIORITY_REALTIME) 194 file_priv->priority = args->value; 195 else 196 ret = -EINVAL; 197 break; 198 default: 199 ret = -EINVAL; 200 } 201 202 return ret; 203 } 204 205 static int ivpu_open(struct drm_device *dev, struct drm_file *file) 206 { 207 struct ivpu_device *vdev = to_ivpu_device(dev); 208 struct ivpu_file_priv *file_priv; 209 u32 ctx_id; 210 void *old; 211 int ret; 212 213 ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, NULL, vdev->context_xa_limit, GFP_KERNEL); 214 if (ret) { 215 ivpu_err(vdev, "Failed to allocate context id: %d\n", ret); 216 return ret; 217 } 218 219 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL); 220 if (!file_priv) { 221 ret = -ENOMEM; 222 goto err_xa_erase; 223 } 224 225 file_priv->vdev = vdev; 226 file_priv->priority = DRM_IVPU_CONTEXT_PRIORITY_NORMAL; 227 kref_init(&file_priv->ref); 228 mutex_init(&file_priv->lock); 229 230 ret = ivpu_mmu_user_context_init(vdev, &file_priv->ctx, ctx_id); 231 if (ret) 232 goto err_mutex_destroy; 233 234 old = xa_store_irq(&vdev->context_xa, ctx_id, file_priv, GFP_KERNEL); 235 if (xa_is_err(old)) { 236 ret = xa_err(old); 237 ivpu_err(vdev, "Failed to store context %u: %d\n", ctx_id, ret); 238 goto err_ctx_fini; 239 } 240 241 ivpu_dbg(vdev, FILE, "file_priv create: ctx %u process %s pid %d\n", 242 ctx_id, current->comm, task_pid_nr(current)); 243 244 file->driver_priv = file_priv; 245 return 0; 246 247 err_ctx_fini: 248 ivpu_mmu_user_context_fini(vdev, &file_priv->ctx); 249 err_mutex_destroy: 250 mutex_destroy(&file_priv->lock); 251 kfree(file_priv); 252 err_xa_erase: 253 xa_erase_irq(&vdev->context_xa, ctx_id); 254 return ret; 255 } 256 257 static void ivpu_postclose(struct drm_device *dev, struct drm_file *file) 258 { 259 struct ivpu_file_priv *file_priv = file->driver_priv; 260 struct ivpu_device *vdev = to_ivpu_device(dev); 261 262 ivpu_dbg(vdev, FILE, "file_priv close: ctx %u process %s pid %d\n", 263 file_priv->ctx.id, current->comm, task_pid_nr(current)); 264 265 ivpu_file_priv_put(&file_priv); 266 } 267 268 static const struct drm_ioctl_desc ivpu_drm_ioctls[] = { 269 DRM_IOCTL_DEF_DRV(IVPU_GET_PARAM, ivpu_get_param_ioctl, 0), 270 DRM_IOCTL_DEF_DRV(IVPU_SET_PARAM, ivpu_set_param_ioctl, 0), 271 DRM_IOCTL_DEF_DRV(IVPU_BO_CREATE, ivpu_bo_create_ioctl, 0), 272 DRM_IOCTL_DEF_DRV(IVPU_BO_INFO, ivpu_bo_info_ioctl, 0), 273 DRM_IOCTL_DEF_DRV(IVPU_SUBMIT, ivpu_submit_ioctl, 0), 274 DRM_IOCTL_DEF_DRV(IVPU_BO_WAIT, ivpu_bo_wait_ioctl, 0), 275 }; 276 277 static int ivpu_wait_for_ready(struct ivpu_device *vdev) 278 { 279 struct ivpu_ipc_consumer cons; 280 struct ivpu_ipc_hdr ipc_hdr; 281 unsigned long timeout; 282 int ret; 283 284 if (ivpu_test_mode == IVPU_TEST_MODE_FW_TEST) 285 return 0; 286 287 ivpu_ipc_consumer_add(vdev, &cons, IVPU_IPC_CHAN_BOOT_MSG); 288 289 timeout = jiffies + msecs_to_jiffies(vdev->timeout.boot); 290 while (1) { 291 ret = ivpu_ipc_irq_handler(vdev); 292 if (ret) 293 break; 294 ret = ivpu_ipc_receive(vdev, &cons, &ipc_hdr, NULL, 0); 295 if (ret != -ETIMEDOUT || time_after_eq(jiffies, timeout)) 296 break; 297 298 cond_resched(); 299 } 300 301 ivpu_ipc_consumer_del(vdev, &cons); 302 303 if (!ret && ipc_hdr.data_addr != IVPU_IPC_BOOT_MSG_DATA_ADDR) { 304 ivpu_err(vdev, "Invalid VPU ready message: 0x%x\n", 305 ipc_hdr.data_addr); 306 return -EIO; 307 } 308 309 if (!ret) 310 ivpu_info(vdev, "VPU ready message received successfully\n"); 311 else 312 ivpu_hw_diagnose_failure(vdev); 313 314 return ret; 315 } 316 317 /** 318 * ivpu_boot() - Start VPU firmware 319 * @vdev: VPU device 320 * 321 * This function is paired with ivpu_shutdown() but it doesn't power up the 322 * VPU because power up has to be called very early in ivpu_probe(). 323 */ 324 int ivpu_boot(struct ivpu_device *vdev) 325 { 326 int ret; 327 328 /* Update boot params located at first 4KB of FW memory */ 329 ivpu_fw_boot_params_setup(vdev, vdev->fw->mem->kvaddr); 330 331 ret = ivpu_hw_boot_fw(vdev); 332 if (ret) { 333 ivpu_err(vdev, "Failed to start the firmware: %d\n", ret); 334 return ret; 335 } 336 337 ret = ivpu_wait_for_ready(vdev); 338 if (ret) { 339 ivpu_err(vdev, "Failed to boot the firmware: %d\n", ret); 340 return ret; 341 } 342 343 ivpu_hw_irq_clear(vdev); 344 enable_irq(vdev->irq); 345 ivpu_hw_irq_enable(vdev); 346 ivpu_ipc_enable(vdev); 347 return 0; 348 } 349 350 int ivpu_shutdown(struct ivpu_device *vdev) 351 { 352 int ret; 353 354 ivpu_hw_irq_disable(vdev); 355 disable_irq(vdev->irq); 356 ivpu_ipc_disable(vdev); 357 ivpu_mmu_disable(vdev); 358 359 ret = ivpu_hw_power_down(vdev); 360 if (ret) 361 ivpu_warn(vdev, "Failed to power down HW: %d\n", ret); 362 363 return ret; 364 } 365 366 static const struct file_operations ivpu_fops = { 367 .owner = THIS_MODULE, 368 DRM_ACCEL_FOPS, 369 }; 370 371 static const struct drm_driver driver = { 372 .driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL, 373 374 .open = ivpu_open, 375 .postclose = ivpu_postclose, 376 .prime_handle_to_fd = drm_gem_prime_handle_to_fd, 377 .prime_fd_to_handle = drm_gem_prime_fd_to_handle, 378 .gem_prime_import = ivpu_gem_prime_import, 379 .gem_prime_mmap = drm_gem_prime_mmap, 380 381 .ioctls = ivpu_drm_ioctls, 382 .num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls), 383 .fops = &ivpu_fops, 384 385 .name = DRIVER_NAME, 386 .desc = DRIVER_DESC, 387 .date = DRIVER_DATE, 388 .major = DRM_IVPU_DRIVER_MAJOR, 389 .minor = DRM_IVPU_DRIVER_MINOR, 390 }; 391 392 static int ivpu_irq_init(struct ivpu_device *vdev) 393 { 394 struct pci_dev *pdev = to_pci_dev(vdev->drm.dev); 395 int ret; 396 397 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX); 398 if (ret < 0) { 399 ivpu_err(vdev, "Failed to allocate a MSI IRQ: %d\n", ret); 400 return ret; 401 } 402 403 vdev->irq = pci_irq_vector(pdev, 0); 404 405 ret = devm_request_irq(vdev->drm.dev, vdev->irq, vdev->hw->ops->irq_handler, 406 IRQF_NO_AUTOEN, DRIVER_NAME, vdev); 407 if (ret) 408 ivpu_err(vdev, "Failed to request an IRQ %d\n", ret); 409 410 return ret; 411 } 412 413 static int ivpu_pci_init(struct ivpu_device *vdev) 414 { 415 struct pci_dev *pdev = to_pci_dev(vdev->drm.dev); 416 struct resource *bar0 = &pdev->resource[0]; 417 struct resource *bar4 = &pdev->resource[4]; 418 int ret; 419 420 ivpu_dbg(vdev, MISC, "Mapping BAR0 (RegV) %pR\n", bar0); 421 vdev->regv = devm_ioremap_resource(vdev->drm.dev, bar0); 422 if (IS_ERR(vdev->regv)) { 423 ivpu_err(vdev, "Failed to map bar 0: %pe\n", vdev->regv); 424 return PTR_ERR(vdev->regv); 425 } 426 427 ivpu_dbg(vdev, MISC, "Mapping BAR4 (RegB) %pR\n", bar4); 428 vdev->regb = devm_ioremap_resource(vdev->drm.dev, bar4); 429 if (IS_ERR(vdev->regb)) { 430 ivpu_err(vdev, "Failed to map bar 4: %pe\n", vdev->regb); 431 return PTR_ERR(vdev->regb); 432 } 433 434 ret = dma_set_mask_and_coherent(vdev->drm.dev, DMA_BIT_MASK(vdev->hw->dma_bits)); 435 if (ret) { 436 ivpu_err(vdev, "Failed to set DMA mask: %d\n", ret); 437 return ret; 438 } 439 dma_set_max_seg_size(vdev->drm.dev, UINT_MAX); 440 441 /* Clear any pending errors */ 442 pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f); 443 444 /* VPU MTL does not require PCI spec 10m D3hot delay */ 445 if (ivpu_is_mtl(vdev)) 446 pdev->d3hot_delay = 0; 447 448 ret = pcim_enable_device(pdev); 449 if (ret) { 450 ivpu_err(vdev, "Failed to enable PCI device: %d\n", ret); 451 return ret; 452 } 453 454 pci_set_master(pdev); 455 456 return 0; 457 } 458 459 static int ivpu_dev_init(struct ivpu_device *vdev) 460 { 461 int ret; 462 463 vdev->hw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->hw), GFP_KERNEL); 464 if (!vdev->hw) 465 return -ENOMEM; 466 467 vdev->mmu = drmm_kzalloc(&vdev->drm, sizeof(*vdev->mmu), GFP_KERNEL); 468 if (!vdev->mmu) 469 return -ENOMEM; 470 471 vdev->fw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->fw), GFP_KERNEL); 472 if (!vdev->fw) 473 return -ENOMEM; 474 475 vdev->ipc = drmm_kzalloc(&vdev->drm, sizeof(*vdev->ipc), GFP_KERNEL); 476 if (!vdev->ipc) 477 return -ENOMEM; 478 479 vdev->pm = drmm_kzalloc(&vdev->drm, sizeof(*vdev->pm), GFP_KERNEL); 480 if (!vdev->pm) 481 return -ENOMEM; 482 483 vdev->hw->ops = &ivpu_hw_mtl_ops; 484 vdev->hw->dma_bits = 38; 485 486 vdev->platform = IVPU_PLATFORM_INVALID; 487 vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID; 488 vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID; 489 atomic64_set(&vdev->unique_id_counter, 0); 490 xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC); 491 xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1); 492 lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key); 493 494 ret = ivpu_pci_init(vdev); 495 if (ret) { 496 ivpu_err(vdev, "Failed to initialize PCI device: %d\n", ret); 497 goto err_xa_destroy; 498 } 499 500 ret = ivpu_irq_init(vdev); 501 if (ret) { 502 ivpu_err(vdev, "Failed to initialize IRQs: %d\n", ret); 503 goto err_xa_destroy; 504 } 505 506 /* Init basic HW info based on buttress registers which are accessible before power up */ 507 ret = ivpu_hw_info_init(vdev); 508 if (ret) { 509 ivpu_err(vdev, "Failed to initialize HW info: %d\n", ret); 510 goto err_xa_destroy; 511 } 512 513 /* Power up early so the rest of init code can access VPU registers */ 514 ret = ivpu_hw_power_up(vdev); 515 if (ret) { 516 ivpu_err(vdev, "Failed to power up HW: %d\n", ret); 517 goto err_xa_destroy; 518 } 519 520 ret = ivpu_mmu_global_context_init(vdev); 521 if (ret) { 522 ivpu_err(vdev, "Failed to initialize global MMU context: %d\n", ret); 523 goto err_power_down; 524 } 525 526 ret = ivpu_mmu_init(vdev); 527 if (ret) { 528 ivpu_err(vdev, "Failed to initialize MMU device: %d\n", ret); 529 goto err_mmu_gctx_fini; 530 } 531 532 ret = ivpu_fw_init(vdev); 533 if (ret) { 534 ivpu_err(vdev, "Failed to initialize firmware: %d\n", ret); 535 goto err_mmu_gctx_fini; 536 } 537 538 ret = ivpu_ipc_init(vdev); 539 if (ret) { 540 ivpu_err(vdev, "Failed to initialize IPC: %d\n", ret); 541 goto err_fw_fini; 542 } 543 544 ret = ivpu_pm_init(vdev); 545 if (ret) { 546 ivpu_err(vdev, "Failed to initialize PM: %d\n", ret); 547 goto err_ipc_fini; 548 } 549 550 ret = ivpu_job_done_thread_init(vdev); 551 if (ret) { 552 ivpu_err(vdev, "Failed to initialize job done thread: %d\n", ret); 553 goto err_ipc_fini; 554 } 555 556 ret = ivpu_fw_load(vdev); 557 if (ret) { 558 ivpu_err(vdev, "Failed to load firmware: %d\n", ret); 559 goto err_job_done_thread_fini; 560 } 561 562 ret = ivpu_boot(vdev); 563 if (ret) { 564 ivpu_err(vdev, "Failed to boot: %d\n", ret); 565 goto err_job_done_thread_fini; 566 } 567 568 ivpu_pm_enable(vdev); 569 570 return 0; 571 572 err_job_done_thread_fini: 573 ivpu_job_done_thread_fini(vdev); 574 err_ipc_fini: 575 ivpu_ipc_fini(vdev); 576 err_fw_fini: 577 ivpu_fw_fini(vdev); 578 err_mmu_gctx_fini: 579 ivpu_mmu_global_context_fini(vdev); 580 err_power_down: 581 ivpu_hw_power_down(vdev); 582 if (IVPU_WA(d3hot_after_power_off)) 583 pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot); 584 err_xa_destroy: 585 xa_destroy(&vdev->submitted_jobs_xa); 586 xa_destroy(&vdev->context_xa); 587 return ret; 588 } 589 590 static void ivpu_dev_fini(struct ivpu_device *vdev) 591 { 592 ivpu_pm_disable(vdev); 593 ivpu_shutdown(vdev); 594 if (IVPU_WA(d3hot_after_power_off)) 595 pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot); 596 ivpu_job_done_thread_fini(vdev); 597 ivpu_pm_cancel_recovery(vdev); 598 599 ivpu_ipc_fini(vdev); 600 ivpu_fw_fini(vdev); 601 ivpu_mmu_global_context_fini(vdev); 602 603 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa)); 604 xa_destroy(&vdev->submitted_jobs_xa); 605 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->context_xa)); 606 xa_destroy(&vdev->context_xa); 607 } 608 609 static struct pci_device_id ivpu_pci_ids[] = { 610 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) }, 611 { } 612 }; 613 MODULE_DEVICE_TABLE(pci, ivpu_pci_ids); 614 615 static int ivpu_probe(struct pci_dev *pdev, const struct pci_device_id *id) 616 { 617 struct ivpu_device *vdev; 618 int ret; 619 620 vdev = devm_drm_dev_alloc(&pdev->dev, &driver, struct ivpu_device, drm); 621 if (IS_ERR(vdev)) 622 return PTR_ERR(vdev); 623 624 pci_set_drvdata(pdev, vdev); 625 626 ret = ivpu_dev_init(vdev); 627 if (ret) { 628 dev_err(&pdev->dev, "Failed to initialize VPU device: %d\n", ret); 629 return ret; 630 } 631 632 ret = drm_dev_register(&vdev->drm, 0); 633 if (ret) { 634 dev_err(&pdev->dev, "Failed to register DRM device: %d\n", ret); 635 ivpu_dev_fini(vdev); 636 } 637 638 return ret; 639 } 640 641 static void ivpu_remove(struct pci_dev *pdev) 642 { 643 struct ivpu_device *vdev = pci_get_drvdata(pdev); 644 645 drm_dev_unplug(&vdev->drm); 646 ivpu_dev_fini(vdev); 647 } 648 649 static const struct dev_pm_ops ivpu_drv_pci_pm = { 650 SET_SYSTEM_SLEEP_PM_OPS(ivpu_pm_suspend_cb, ivpu_pm_resume_cb) 651 SET_RUNTIME_PM_OPS(ivpu_pm_runtime_suspend_cb, ivpu_pm_runtime_resume_cb, NULL) 652 }; 653 654 static const struct pci_error_handlers ivpu_drv_pci_err = { 655 .reset_prepare = ivpu_pm_reset_prepare_cb, 656 .reset_done = ivpu_pm_reset_done_cb, 657 }; 658 659 static struct pci_driver ivpu_pci_driver = { 660 .name = KBUILD_MODNAME, 661 .id_table = ivpu_pci_ids, 662 .probe = ivpu_probe, 663 .remove = ivpu_remove, 664 .driver = { 665 .pm = &ivpu_drv_pci_pm, 666 }, 667 .err_handler = &ivpu_drv_pci_err, 668 }; 669 670 module_pci_driver(ivpu_pci_driver); 671 672 MODULE_AUTHOR("Intel Corporation"); 673 MODULE_DESCRIPTION(DRIVER_DESC); 674 MODULE_LICENSE("GPL and additional rights"); 675 MODULE_VERSION(DRIVER_VERSION_STR); 676