1 /* 2 * Copyright 2014 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 */ 22 23 #include <linux/bsearch.h> 24 #include <linux/pci.h> 25 #include <linux/slab.h> 26 #include "kfd_priv.h" 27 #include "kfd_device_queue_manager.h" 28 #include "kfd_pm4_headers_vi.h" 29 #include "cwsr_trap_handler.h" 30 #include "kfd_iommu.h" 31 32 #define MQD_SIZE_ALIGNED 768 33 34 /* 35 * kfd_locked is used to lock the kfd driver during suspend or reset 36 * once locked, kfd driver will stop any further GPU execution. 37 * create process (open) will return -EAGAIN. 38 */ 39 static atomic_t kfd_locked = ATOMIC_INIT(0); 40 41 #ifdef KFD_SUPPORT_IOMMU_V2 42 static const struct kfd_device_info kaveri_device_info = { 43 .asic_family = CHIP_KAVERI, 44 .max_pasid_bits = 16, 45 /* max num of queues for KV.TODO should be a dynamic value */ 46 .max_no_of_hqd = 24, 47 .doorbell_size = 4, 48 .ih_ring_entry_size = 4 * sizeof(uint32_t), 49 .event_interrupt_class = &event_interrupt_class_cik, 50 .num_of_watch_points = 4, 51 .mqd_size_aligned = MQD_SIZE_ALIGNED, 52 .supports_cwsr = false, 53 .needs_iommu_device = true, 54 .needs_pci_atomics = false, 55 .num_sdma_engines = 2, 56 }; 57 58 static const struct kfd_device_info carrizo_device_info = { 59 .asic_family = CHIP_CARRIZO, 60 .max_pasid_bits = 16, 61 /* max num of queues for CZ.TODO should be a dynamic value */ 62 .max_no_of_hqd = 24, 63 .doorbell_size = 4, 64 .ih_ring_entry_size = 4 * sizeof(uint32_t), 65 .event_interrupt_class = &event_interrupt_class_cik, 66 .num_of_watch_points = 4, 67 .mqd_size_aligned = MQD_SIZE_ALIGNED, 68 .supports_cwsr = true, 69 .needs_iommu_device = true, 70 .needs_pci_atomics = false, 71 .num_sdma_engines = 2, 72 }; 73 74 static const struct kfd_device_info raven_device_info = { 75 .asic_family = CHIP_RAVEN, 76 .max_pasid_bits = 16, 77 .max_no_of_hqd = 24, 78 .doorbell_size = 8, 79 .ih_ring_entry_size = 8 * sizeof(uint32_t), 80 .event_interrupt_class = &event_interrupt_class_v9, 81 .num_of_watch_points = 4, 82 .mqd_size_aligned = MQD_SIZE_ALIGNED, 83 .supports_cwsr = true, 84 .needs_iommu_device = true, 85 .needs_pci_atomics = true, 86 .num_sdma_engines = 1, 87 }; 88 #endif 89 90 static const struct kfd_device_info hawaii_device_info = { 91 .asic_family = CHIP_HAWAII, 92 .max_pasid_bits = 16, 93 /* max num of queues for KV.TODO should be a dynamic value */ 94 .max_no_of_hqd = 24, 95 .doorbell_size = 4, 96 .ih_ring_entry_size = 4 * sizeof(uint32_t), 97 .event_interrupt_class = &event_interrupt_class_cik, 98 .num_of_watch_points = 4, 99 .mqd_size_aligned = MQD_SIZE_ALIGNED, 100 .supports_cwsr = false, 101 .needs_iommu_device = false, 102 .needs_pci_atomics = false, 103 .num_sdma_engines = 2, 104 }; 105 106 static const struct kfd_device_info tonga_device_info = { 107 .asic_family = CHIP_TONGA, 108 .max_pasid_bits = 16, 109 .max_no_of_hqd = 24, 110 .doorbell_size = 4, 111 .ih_ring_entry_size = 4 * sizeof(uint32_t), 112 .event_interrupt_class = &event_interrupt_class_cik, 113 .num_of_watch_points = 4, 114 .mqd_size_aligned = MQD_SIZE_ALIGNED, 115 .supports_cwsr = false, 116 .needs_iommu_device = false, 117 .needs_pci_atomics = true, 118 .num_sdma_engines = 2, 119 }; 120 121 static const struct kfd_device_info tonga_vf_device_info = { 122 .asic_family = CHIP_TONGA, 123 .max_pasid_bits = 16, 124 .max_no_of_hqd = 24, 125 .doorbell_size = 4, 126 .ih_ring_entry_size = 4 * sizeof(uint32_t), 127 .event_interrupt_class = &event_interrupt_class_cik, 128 .num_of_watch_points = 4, 129 .mqd_size_aligned = MQD_SIZE_ALIGNED, 130 .supports_cwsr = false, 131 .needs_iommu_device = false, 132 .needs_pci_atomics = false, 133 .num_sdma_engines = 2, 134 }; 135 136 static const struct kfd_device_info fiji_device_info = { 137 .asic_family = CHIP_FIJI, 138 .max_pasid_bits = 16, 139 .max_no_of_hqd = 24, 140 .doorbell_size = 4, 141 .ih_ring_entry_size = 4 * sizeof(uint32_t), 142 .event_interrupt_class = &event_interrupt_class_cik, 143 .num_of_watch_points = 4, 144 .mqd_size_aligned = MQD_SIZE_ALIGNED, 145 .supports_cwsr = true, 146 .needs_iommu_device = false, 147 .needs_pci_atomics = true, 148 .num_sdma_engines = 2, 149 }; 150 151 static const struct kfd_device_info fiji_vf_device_info = { 152 .asic_family = CHIP_FIJI, 153 .max_pasid_bits = 16, 154 .max_no_of_hqd = 24, 155 .doorbell_size = 4, 156 .ih_ring_entry_size = 4 * sizeof(uint32_t), 157 .event_interrupt_class = &event_interrupt_class_cik, 158 .num_of_watch_points = 4, 159 .mqd_size_aligned = MQD_SIZE_ALIGNED, 160 .supports_cwsr = true, 161 .needs_iommu_device = false, 162 .needs_pci_atomics = false, 163 .num_sdma_engines = 2, 164 }; 165 166 167 static const struct kfd_device_info polaris10_device_info = { 168 .asic_family = CHIP_POLARIS10, 169 .max_pasid_bits = 16, 170 .max_no_of_hqd = 24, 171 .doorbell_size = 4, 172 .ih_ring_entry_size = 4 * sizeof(uint32_t), 173 .event_interrupt_class = &event_interrupt_class_cik, 174 .num_of_watch_points = 4, 175 .mqd_size_aligned = MQD_SIZE_ALIGNED, 176 .supports_cwsr = true, 177 .needs_iommu_device = false, 178 .needs_pci_atomics = true, 179 .num_sdma_engines = 2, 180 }; 181 182 static const struct kfd_device_info polaris10_vf_device_info = { 183 .asic_family = CHIP_POLARIS10, 184 .max_pasid_bits = 16, 185 .max_no_of_hqd = 24, 186 .doorbell_size = 4, 187 .ih_ring_entry_size = 4 * sizeof(uint32_t), 188 .event_interrupt_class = &event_interrupt_class_cik, 189 .num_of_watch_points = 4, 190 .mqd_size_aligned = MQD_SIZE_ALIGNED, 191 .supports_cwsr = true, 192 .needs_iommu_device = false, 193 .needs_pci_atomics = false, 194 .num_sdma_engines = 2, 195 }; 196 197 static const struct kfd_device_info polaris11_device_info = { 198 .asic_family = CHIP_POLARIS11, 199 .max_pasid_bits = 16, 200 .max_no_of_hqd = 24, 201 .doorbell_size = 4, 202 .ih_ring_entry_size = 4 * sizeof(uint32_t), 203 .event_interrupt_class = &event_interrupt_class_cik, 204 .num_of_watch_points = 4, 205 .mqd_size_aligned = MQD_SIZE_ALIGNED, 206 .supports_cwsr = true, 207 .needs_iommu_device = false, 208 .needs_pci_atomics = true, 209 .num_sdma_engines = 2, 210 }; 211 212 static const struct kfd_device_info vega10_device_info = { 213 .asic_family = CHIP_VEGA10, 214 .max_pasid_bits = 16, 215 .max_no_of_hqd = 24, 216 .doorbell_size = 8, 217 .ih_ring_entry_size = 8 * sizeof(uint32_t), 218 .event_interrupt_class = &event_interrupt_class_v9, 219 .num_of_watch_points = 4, 220 .mqd_size_aligned = MQD_SIZE_ALIGNED, 221 .supports_cwsr = true, 222 .needs_iommu_device = false, 223 .needs_pci_atomics = false, 224 .num_sdma_engines = 2, 225 }; 226 227 static const struct kfd_device_info vega10_vf_device_info = { 228 .asic_family = CHIP_VEGA10, 229 .max_pasid_bits = 16, 230 .max_no_of_hqd = 24, 231 .doorbell_size = 8, 232 .ih_ring_entry_size = 8 * sizeof(uint32_t), 233 .event_interrupt_class = &event_interrupt_class_v9, 234 .num_of_watch_points = 4, 235 .mqd_size_aligned = MQD_SIZE_ALIGNED, 236 .supports_cwsr = true, 237 .needs_iommu_device = false, 238 .needs_pci_atomics = false, 239 .num_sdma_engines = 2, 240 }; 241 242 243 struct kfd_deviceid { 244 unsigned short did; 245 const struct kfd_device_info *device_info; 246 }; 247 248 static const struct kfd_deviceid supported_devices[] = { 249 #ifdef KFD_SUPPORT_IOMMU_V2 250 { 0x1304, &kaveri_device_info }, /* Kaveri */ 251 { 0x1305, &kaveri_device_info }, /* Kaveri */ 252 { 0x1306, &kaveri_device_info }, /* Kaveri */ 253 { 0x1307, &kaveri_device_info }, /* Kaveri */ 254 { 0x1309, &kaveri_device_info }, /* Kaveri */ 255 { 0x130A, &kaveri_device_info }, /* Kaveri */ 256 { 0x130B, &kaveri_device_info }, /* Kaveri */ 257 { 0x130C, &kaveri_device_info }, /* Kaveri */ 258 { 0x130D, &kaveri_device_info }, /* Kaveri */ 259 { 0x130E, &kaveri_device_info }, /* Kaveri */ 260 { 0x130F, &kaveri_device_info }, /* Kaveri */ 261 { 0x1310, &kaveri_device_info }, /* Kaveri */ 262 { 0x1311, &kaveri_device_info }, /* Kaveri */ 263 { 0x1312, &kaveri_device_info }, /* Kaveri */ 264 { 0x1313, &kaveri_device_info }, /* Kaveri */ 265 { 0x1315, &kaveri_device_info }, /* Kaveri */ 266 { 0x1316, &kaveri_device_info }, /* Kaveri */ 267 { 0x1317, &kaveri_device_info }, /* Kaveri */ 268 { 0x1318, &kaveri_device_info }, /* Kaveri */ 269 { 0x131B, &kaveri_device_info }, /* Kaveri */ 270 { 0x131C, &kaveri_device_info }, /* Kaveri */ 271 { 0x131D, &kaveri_device_info }, /* Kaveri */ 272 { 0x9870, &carrizo_device_info }, /* Carrizo */ 273 { 0x9874, &carrizo_device_info }, /* Carrizo */ 274 { 0x9875, &carrizo_device_info }, /* Carrizo */ 275 { 0x9876, &carrizo_device_info }, /* Carrizo */ 276 { 0x9877, &carrizo_device_info }, /* Carrizo */ 277 { 0x15DD, &raven_device_info }, /* Raven */ 278 #endif 279 { 0x67A0, &hawaii_device_info }, /* Hawaii */ 280 { 0x67A1, &hawaii_device_info }, /* Hawaii */ 281 { 0x67A2, &hawaii_device_info }, /* Hawaii */ 282 { 0x67A8, &hawaii_device_info }, /* Hawaii */ 283 { 0x67A9, &hawaii_device_info }, /* Hawaii */ 284 { 0x67AA, &hawaii_device_info }, /* Hawaii */ 285 { 0x67B0, &hawaii_device_info }, /* Hawaii */ 286 { 0x67B1, &hawaii_device_info }, /* Hawaii */ 287 { 0x67B8, &hawaii_device_info }, /* Hawaii */ 288 { 0x67B9, &hawaii_device_info }, /* Hawaii */ 289 { 0x67BA, &hawaii_device_info }, /* Hawaii */ 290 { 0x67BE, &hawaii_device_info }, /* Hawaii */ 291 { 0x6920, &tonga_device_info }, /* Tonga */ 292 { 0x6921, &tonga_device_info }, /* Tonga */ 293 { 0x6928, &tonga_device_info }, /* Tonga */ 294 { 0x6929, &tonga_device_info }, /* Tonga */ 295 { 0x692B, &tonga_device_info }, /* Tonga */ 296 { 0x692F, &tonga_vf_device_info }, /* Tonga vf */ 297 { 0x6938, &tonga_device_info }, /* Tonga */ 298 { 0x6939, &tonga_device_info }, /* Tonga */ 299 { 0x7300, &fiji_device_info }, /* Fiji */ 300 { 0x730F, &fiji_vf_device_info }, /* Fiji vf*/ 301 { 0x67C0, &polaris10_device_info }, /* Polaris10 */ 302 { 0x67C1, &polaris10_device_info }, /* Polaris10 */ 303 { 0x67C2, &polaris10_device_info }, /* Polaris10 */ 304 { 0x67C4, &polaris10_device_info }, /* Polaris10 */ 305 { 0x67C7, &polaris10_device_info }, /* Polaris10 */ 306 { 0x67C8, &polaris10_device_info }, /* Polaris10 */ 307 { 0x67C9, &polaris10_device_info }, /* Polaris10 */ 308 { 0x67CA, &polaris10_device_info }, /* Polaris10 */ 309 { 0x67CC, &polaris10_device_info }, /* Polaris10 */ 310 { 0x67CF, &polaris10_device_info }, /* Polaris10 */ 311 { 0x67D0, &polaris10_vf_device_info }, /* Polaris10 vf*/ 312 { 0x67DF, &polaris10_device_info }, /* Polaris10 */ 313 { 0x67E0, &polaris11_device_info }, /* Polaris11 */ 314 { 0x67E1, &polaris11_device_info }, /* Polaris11 */ 315 { 0x67E3, &polaris11_device_info }, /* Polaris11 */ 316 { 0x67E7, &polaris11_device_info }, /* Polaris11 */ 317 { 0x67E8, &polaris11_device_info }, /* Polaris11 */ 318 { 0x67E9, &polaris11_device_info }, /* Polaris11 */ 319 { 0x67EB, &polaris11_device_info }, /* Polaris11 */ 320 { 0x67EF, &polaris11_device_info }, /* Polaris11 */ 321 { 0x67FF, &polaris11_device_info }, /* Polaris11 */ 322 { 0x6860, &vega10_device_info }, /* Vega10 */ 323 { 0x6861, &vega10_device_info }, /* Vega10 */ 324 { 0x6862, &vega10_device_info }, /* Vega10 */ 325 { 0x6863, &vega10_device_info }, /* Vega10 */ 326 { 0x6864, &vega10_device_info }, /* Vega10 */ 327 { 0x6867, &vega10_device_info }, /* Vega10 */ 328 { 0x6868, &vega10_device_info }, /* Vega10 */ 329 { 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/ 330 { 0x687F, &vega10_device_info }, /* Vega10 */ 331 }; 332 333 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, 334 unsigned int chunk_size); 335 static void kfd_gtt_sa_fini(struct kfd_dev *kfd); 336 337 static int kfd_resume(struct kfd_dev *kfd); 338 339 static const struct kfd_device_info *lookup_device_info(unsigned short did) 340 { 341 size_t i; 342 343 for (i = 0; i < ARRAY_SIZE(supported_devices); i++) { 344 if (supported_devices[i].did == did) { 345 WARN_ON(!supported_devices[i].device_info); 346 return supported_devices[i].device_info; 347 } 348 } 349 350 dev_warn(kfd_device, "DID %04x is missing in supported_devices\n", 351 did); 352 353 return NULL; 354 } 355 356 struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, 357 struct pci_dev *pdev, const struct kfd2kgd_calls *f2g) 358 { 359 struct kfd_dev *kfd; 360 int ret; 361 const struct kfd_device_info *device_info = 362 lookup_device_info(pdev->device); 363 364 if (!device_info) { 365 dev_err(kfd_device, "kgd2kfd_probe failed\n"); 366 return NULL; 367 } 368 369 /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps. 370 * 32 and 64-bit requests are possible and must be 371 * supported. 372 */ 373 ret = pci_enable_atomic_ops_to_root(pdev, 374 PCI_EXP_DEVCAP2_ATOMIC_COMP32 | 375 PCI_EXP_DEVCAP2_ATOMIC_COMP64); 376 if (device_info->needs_pci_atomics && ret < 0) { 377 dev_info(kfd_device, 378 "skipped device %x:%x, PCI rejects atomics\n", 379 pdev->vendor, pdev->device); 380 return NULL; 381 } 382 383 kfd = kzalloc(sizeof(*kfd), GFP_KERNEL); 384 if (!kfd) 385 return NULL; 386 387 kfd->kgd = kgd; 388 kfd->device_info = device_info; 389 kfd->pdev = pdev; 390 kfd->init_complete = false; 391 kfd->kfd2kgd = f2g; 392 393 mutex_init(&kfd->doorbell_mutex); 394 memset(&kfd->doorbell_available_index, 0, 395 sizeof(kfd->doorbell_available_index)); 396 397 return kfd; 398 } 399 400 static void kfd_cwsr_init(struct kfd_dev *kfd) 401 { 402 if (cwsr_enable && kfd->device_info->supports_cwsr) { 403 if (kfd->device_info->asic_family < CHIP_VEGA10) { 404 BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE); 405 kfd->cwsr_isa = cwsr_trap_gfx8_hex; 406 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex); 407 } else { 408 BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE); 409 kfd->cwsr_isa = cwsr_trap_gfx9_hex; 410 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex); 411 } 412 413 kfd->cwsr_enabled = true; 414 } 415 } 416 417 bool kgd2kfd_device_init(struct kfd_dev *kfd, 418 const struct kgd2kfd_shared_resources *gpu_resources) 419 { 420 unsigned int size; 421 422 kfd->shared_resources = *gpu_resources; 423 424 kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1; 425 kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1; 426 kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd 427 - kfd->vm_info.first_vmid_kfd + 1; 428 429 /* Verify module parameters regarding mapped process number*/ 430 if ((hws_max_conc_proc < 0) 431 || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) { 432 dev_err(kfd_device, 433 "hws_max_conc_proc %d must be between 0 and %d, use %d instead\n", 434 hws_max_conc_proc, kfd->vm_info.vmid_num_kfd, 435 kfd->vm_info.vmid_num_kfd); 436 kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd; 437 } else 438 kfd->max_proc_per_quantum = hws_max_conc_proc; 439 440 /* calculate max size of mqds needed for queues */ 441 size = max_num_of_queues_per_device * 442 kfd->device_info->mqd_size_aligned; 443 444 /* 445 * calculate max size of runlist packet. 446 * There can be only 2 packets at once 447 */ 448 size += (KFD_MAX_NUM_OF_PROCESSES * sizeof(struct pm4_mes_map_process) + 449 max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues) 450 + sizeof(struct pm4_mes_runlist)) * 2; 451 452 /* Add size of HIQ & DIQ */ 453 size += KFD_KERNEL_QUEUE_SIZE * 2; 454 455 /* add another 512KB for all other allocations on gart (HPD, fences) */ 456 size += 512 * 1024; 457 458 if (kfd->kfd2kgd->init_gtt_mem_allocation( 459 kfd->kgd, size, &kfd->gtt_mem, 460 &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr)){ 461 dev_err(kfd_device, "Could not allocate %d bytes\n", size); 462 goto out; 463 } 464 465 dev_info(kfd_device, "Allocated %d bytes on gart\n", size); 466 467 /* Initialize GTT sa with 512 byte chunk size */ 468 if (kfd_gtt_sa_init(kfd, size, 512) != 0) { 469 dev_err(kfd_device, "Error initializing gtt sub-allocator\n"); 470 goto kfd_gtt_sa_init_error; 471 } 472 473 if (kfd_doorbell_init(kfd)) { 474 dev_err(kfd_device, 475 "Error initializing doorbell aperture\n"); 476 goto kfd_doorbell_error; 477 } 478 479 if (kfd->kfd2kgd->get_hive_id) 480 kfd->hive_id = kfd->kfd2kgd->get_hive_id(kfd->kgd); 481 482 if (kfd_topology_add_device(kfd)) { 483 dev_err(kfd_device, "Error adding device to topology\n"); 484 goto kfd_topology_add_device_error; 485 } 486 487 if (kfd_interrupt_init(kfd)) { 488 dev_err(kfd_device, "Error initializing interrupts\n"); 489 goto kfd_interrupt_error; 490 } 491 492 kfd->dqm = device_queue_manager_init(kfd); 493 if (!kfd->dqm) { 494 dev_err(kfd_device, "Error initializing queue manager\n"); 495 goto device_queue_manager_error; 496 } 497 498 if (kfd_iommu_device_init(kfd)) { 499 dev_err(kfd_device, "Error initializing iommuv2\n"); 500 goto device_iommu_error; 501 } 502 503 kfd_cwsr_init(kfd); 504 505 if (kfd_resume(kfd)) 506 goto kfd_resume_error; 507 508 kfd->dbgmgr = NULL; 509 510 kfd->init_complete = true; 511 dev_info(kfd_device, "added device %x:%x\n", kfd->pdev->vendor, 512 kfd->pdev->device); 513 514 pr_debug("Starting kfd with the following scheduling policy %d\n", 515 kfd->dqm->sched_policy); 516 517 goto out; 518 519 kfd_resume_error: 520 device_iommu_error: 521 device_queue_manager_uninit(kfd->dqm); 522 device_queue_manager_error: 523 kfd_interrupt_exit(kfd); 524 kfd_interrupt_error: 525 kfd_topology_remove_device(kfd); 526 kfd_topology_add_device_error: 527 kfd_doorbell_fini(kfd); 528 kfd_doorbell_error: 529 kfd_gtt_sa_fini(kfd); 530 kfd_gtt_sa_init_error: 531 kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem); 532 dev_err(kfd_device, 533 "device %x:%x NOT added due to errors\n", 534 kfd->pdev->vendor, kfd->pdev->device); 535 out: 536 return kfd->init_complete; 537 } 538 539 void kgd2kfd_device_exit(struct kfd_dev *kfd) 540 { 541 if (kfd->init_complete) { 542 kgd2kfd_suspend(kfd); 543 device_queue_manager_uninit(kfd->dqm); 544 kfd_interrupt_exit(kfd); 545 kfd_topology_remove_device(kfd); 546 kfd_doorbell_fini(kfd); 547 kfd_gtt_sa_fini(kfd); 548 kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem); 549 } 550 551 kfree(kfd); 552 } 553 554 int kgd2kfd_pre_reset(struct kfd_dev *kfd) 555 { 556 if (!kfd->init_complete) 557 return 0; 558 kgd2kfd_suspend(kfd); 559 560 /* hold dqm->lock to prevent further execution*/ 561 dqm_lock(kfd->dqm); 562 563 kfd_signal_reset_event(kfd); 564 return 0; 565 } 566 567 /* 568 * Fix me. KFD won't be able to resume existing process for now. 569 * We will keep all existing process in a evicted state and 570 * wait the process to be terminated. 571 */ 572 573 int kgd2kfd_post_reset(struct kfd_dev *kfd) 574 { 575 int ret, count; 576 577 if (!kfd->init_complete) 578 return 0; 579 580 dqm_unlock(kfd->dqm); 581 582 ret = kfd_resume(kfd); 583 if (ret) 584 return ret; 585 count = atomic_dec_return(&kfd_locked); 586 WARN_ONCE(count != 0, "KFD reset ref. error"); 587 return 0; 588 } 589 590 bool kfd_is_locked(void) 591 { 592 return (atomic_read(&kfd_locked) > 0); 593 } 594 595 void kgd2kfd_suspend(struct kfd_dev *kfd) 596 { 597 if (!kfd->init_complete) 598 return; 599 600 /* For first KFD device suspend all the KFD processes */ 601 if (atomic_inc_return(&kfd_locked) == 1) 602 kfd_suspend_all_processes(); 603 604 kfd->dqm->ops.stop(kfd->dqm); 605 606 kfd_iommu_suspend(kfd); 607 } 608 609 int kgd2kfd_resume(struct kfd_dev *kfd) 610 { 611 int ret, count; 612 613 if (!kfd->init_complete) 614 return 0; 615 616 ret = kfd_resume(kfd); 617 if (ret) 618 return ret; 619 620 count = atomic_dec_return(&kfd_locked); 621 WARN_ONCE(count < 0, "KFD suspend / resume ref. error"); 622 if (count == 0) 623 ret = kfd_resume_all_processes(); 624 625 return ret; 626 } 627 628 static int kfd_resume(struct kfd_dev *kfd) 629 { 630 int err = 0; 631 632 err = kfd_iommu_resume(kfd); 633 if (err) { 634 dev_err(kfd_device, 635 "Failed to resume IOMMU for device %x:%x\n", 636 kfd->pdev->vendor, kfd->pdev->device); 637 return err; 638 } 639 640 err = kfd->dqm->ops.start(kfd->dqm); 641 if (err) { 642 dev_err(kfd_device, 643 "Error starting queue manager for device %x:%x\n", 644 kfd->pdev->vendor, kfd->pdev->device); 645 goto dqm_start_error; 646 } 647 648 return err; 649 650 dqm_start_error: 651 kfd_iommu_suspend(kfd); 652 return err; 653 } 654 655 /* This is called directly from KGD at ISR. */ 656 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) 657 { 658 uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE]; 659 bool is_patched = false; 660 661 if (!kfd->init_complete) 662 return; 663 664 if (kfd->device_info->ih_ring_entry_size > sizeof(patched_ihre)) { 665 dev_err_once(kfd_device, "Ring entry too small\n"); 666 return; 667 } 668 669 spin_lock(&kfd->interrupt_lock); 670 671 if (kfd->interrupts_active 672 && interrupt_is_wanted(kfd, ih_ring_entry, 673 patched_ihre, &is_patched) 674 && enqueue_ih_ring_entry(kfd, 675 is_patched ? patched_ihre : ih_ring_entry)) 676 queue_work(kfd->ih_wq, &kfd->interrupt_work); 677 678 spin_unlock(&kfd->interrupt_lock); 679 } 680 681 int kgd2kfd_quiesce_mm(struct mm_struct *mm) 682 { 683 struct kfd_process *p; 684 int r; 685 686 /* Because we are called from arbitrary context (workqueue) as opposed 687 * to process context, kfd_process could attempt to exit while we are 688 * running so the lookup function increments the process ref count. 689 */ 690 p = kfd_lookup_process_by_mm(mm); 691 if (!p) 692 return -ESRCH; 693 694 r = kfd_process_evict_queues(p); 695 696 kfd_unref_process(p); 697 return r; 698 } 699 700 int kgd2kfd_resume_mm(struct mm_struct *mm) 701 { 702 struct kfd_process *p; 703 int r; 704 705 /* Because we are called from arbitrary context (workqueue) as opposed 706 * to process context, kfd_process could attempt to exit while we are 707 * running so the lookup function increments the process ref count. 708 */ 709 p = kfd_lookup_process_by_mm(mm); 710 if (!p) 711 return -ESRCH; 712 713 r = kfd_process_restore_queues(p); 714 715 kfd_unref_process(p); 716 return r; 717 } 718 719 /** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will 720 * prepare for safe eviction of KFD BOs that belong to the specified 721 * process. 722 * 723 * @mm: mm_struct that identifies the specified KFD process 724 * @fence: eviction fence attached to KFD process BOs 725 * 726 */ 727 int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm, 728 struct dma_fence *fence) 729 { 730 struct kfd_process *p; 731 unsigned long active_time; 732 unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS); 733 734 if (!fence) 735 return -EINVAL; 736 737 if (dma_fence_is_signaled(fence)) 738 return 0; 739 740 p = kfd_lookup_process_by_mm(mm); 741 if (!p) 742 return -ENODEV; 743 744 if (fence->seqno == p->last_eviction_seqno) 745 goto out; 746 747 p->last_eviction_seqno = fence->seqno; 748 749 /* Avoid KFD process starvation. Wait for at least 750 * PROCESS_ACTIVE_TIME_MS before evicting the process again 751 */ 752 active_time = get_jiffies_64() - p->last_restore_timestamp; 753 if (delay_jiffies > active_time) 754 delay_jiffies -= active_time; 755 else 756 delay_jiffies = 0; 757 758 /* During process initialization eviction_work.dwork is initialized 759 * to kfd_evict_bo_worker 760 */ 761 schedule_delayed_work(&p->eviction_work, delay_jiffies); 762 out: 763 kfd_unref_process(p); 764 return 0; 765 } 766 767 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, 768 unsigned int chunk_size) 769 { 770 unsigned int num_of_longs; 771 772 if (WARN_ON(buf_size < chunk_size)) 773 return -EINVAL; 774 if (WARN_ON(buf_size == 0)) 775 return -EINVAL; 776 if (WARN_ON(chunk_size == 0)) 777 return -EINVAL; 778 779 kfd->gtt_sa_chunk_size = chunk_size; 780 kfd->gtt_sa_num_of_chunks = buf_size / chunk_size; 781 782 num_of_longs = (kfd->gtt_sa_num_of_chunks + BITS_PER_LONG - 1) / 783 BITS_PER_LONG; 784 785 kfd->gtt_sa_bitmap = kcalloc(num_of_longs, sizeof(long), GFP_KERNEL); 786 787 if (!kfd->gtt_sa_bitmap) 788 return -ENOMEM; 789 790 pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n", 791 kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap); 792 793 mutex_init(&kfd->gtt_sa_lock); 794 795 return 0; 796 797 } 798 799 static void kfd_gtt_sa_fini(struct kfd_dev *kfd) 800 { 801 mutex_destroy(&kfd->gtt_sa_lock); 802 kfree(kfd->gtt_sa_bitmap); 803 } 804 805 static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr, 806 unsigned int bit_num, 807 unsigned int chunk_size) 808 { 809 return start_addr + bit_num * chunk_size; 810 } 811 812 static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr, 813 unsigned int bit_num, 814 unsigned int chunk_size) 815 { 816 return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size); 817 } 818 819 int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size, 820 struct kfd_mem_obj **mem_obj) 821 { 822 unsigned int found, start_search, cur_size; 823 824 if (size == 0) 825 return -EINVAL; 826 827 if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size) 828 return -ENOMEM; 829 830 *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL); 831 if (!(*mem_obj)) 832 return -ENOMEM; 833 834 pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size); 835 836 start_search = 0; 837 838 mutex_lock(&kfd->gtt_sa_lock); 839 840 kfd_gtt_restart_search: 841 /* Find the first chunk that is free */ 842 found = find_next_zero_bit(kfd->gtt_sa_bitmap, 843 kfd->gtt_sa_num_of_chunks, 844 start_search); 845 846 pr_debug("Found = %d\n", found); 847 848 /* If there wasn't any free chunk, bail out */ 849 if (found == kfd->gtt_sa_num_of_chunks) 850 goto kfd_gtt_no_free_chunk; 851 852 /* Update fields of mem_obj */ 853 (*mem_obj)->range_start = found; 854 (*mem_obj)->range_end = found; 855 (*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr( 856 kfd->gtt_start_gpu_addr, 857 found, 858 kfd->gtt_sa_chunk_size); 859 (*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr( 860 kfd->gtt_start_cpu_ptr, 861 found, 862 kfd->gtt_sa_chunk_size); 863 864 pr_debug("gpu_addr = %p, cpu_addr = %p\n", 865 (uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr); 866 867 /* If we need only one chunk, mark it as allocated and get out */ 868 if (size <= kfd->gtt_sa_chunk_size) { 869 pr_debug("Single bit\n"); 870 set_bit(found, kfd->gtt_sa_bitmap); 871 goto kfd_gtt_out; 872 } 873 874 /* Otherwise, try to see if we have enough contiguous chunks */ 875 cur_size = size - kfd->gtt_sa_chunk_size; 876 do { 877 (*mem_obj)->range_end = 878 find_next_zero_bit(kfd->gtt_sa_bitmap, 879 kfd->gtt_sa_num_of_chunks, ++found); 880 /* 881 * If next free chunk is not contiguous than we need to 882 * restart our search from the last free chunk we found (which 883 * wasn't contiguous to the previous ones 884 */ 885 if ((*mem_obj)->range_end != found) { 886 start_search = found; 887 goto kfd_gtt_restart_search; 888 } 889 890 /* 891 * If we reached end of buffer, bail out with error 892 */ 893 if (found == kfd->gtt_sa_num_of_chunks) 894 goto kfd_gtt_no_free_chunk; 895 896 /* Check if we don't need another chunk */ 897 if (cur_size <= kfd->gtt_sa_chunk_size) 898 cur_size = 0; 899 else 900 cur_size -= kfd->gtt_sa_chunk_size; 901 902 } while (cur_size > 0); 903 904 pr_debug("range_start = %d, range_end = %d\n", 905 (*mem_obj)->range_start, (*mem_obj)->range_end); 906 907 /* Mark the chunks as allocated */ 908 for (found = (*mem_obj)->range_start; 909 found <= (*mem_obj)->range_end; 910 found++) 911 set_bit(found, kfd->gtt_sa_bitmap); 912 913 kfd_gtt_out: 914 mutex_unlock(&kfd->gtt_sa_lock); 915 return 0; 916 917 kfd_gtt_no_free_chunk: 918 pr_debug("Allocation failed with mem_obj = %p\n", mem_obj); 919 mutex_unlock(&kfd->gtt_sa_lock); 920 kfree(mem_obj); 921 return -ENOMEM; 922 } 923 924 int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj) 925 { 926 unsigned int bit; 927 928 /* Act like kfree when trying to free a NULL object */ 929 if (!mem_obj) 930 return 0; 931 932 pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n", 933 mem_obj, mem_obj->range_start, mem_obj->range_end); 934 935 mutex_lock(&kfd->gtt_sa_lock); 936 937 /* Mark the chunks as free */ 938 for (bit = mem_obj->range_start; 939 bit <= mem_obj->range_end; 940 bit++) 941 clear_bit(bit, kfd->gtt_sa_bitmap); 942 943 mutex_unlock(&kfd->gtt_sa_lock); 944 945 kfree(mem_obj); 946 return 0; 947 } 948 949 #if defined(CONFIG_DEBUG_FS) 950 951 /* This function will send a package to HIQ to hang the HWS 952 * which will trigger a GPU reset and bring the HWS back to normal state 953 */ 954 int kfd_debugfs_hang_hws(struct kfd_dev *dev) 955 { 956 int r = 0; 957 958 if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) { 959 pr_err("HWS is not enabled"); 960 return -EINVAL; 961 } 962 963 r = pm_debugfs_hang_hws(&dev->dqm->packets); 964 if (!r) 965 r = dqm_debugfs_execute_queues(dev->dqm); 966 967 return r; 968 } 969 970 #endif 971